problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_12988 | rasdani/github-patches | git_diff | elastic__ecs-1488 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`doc_values` parameter not set in Beats artifact
Certain fields have `index: false` and `doc_values: false` in their ECS definition, like `event.original`:
https://github.com/elastic/ecs/blob/master/schemas/event.yml#L577-L599
When `doc_values: false` is defined in the field definition, it's not being added to the maintained Beats fields YAML artifact:
https://github.com/elastic/ecs/blob/master/generated/beats/fields.ecs.yml#L1737-L1750
</issue>
<code>
[start of scripts/generators/beats.py]
1 from os.path import join
2 from collections import OrderedDict
3 from generators import ecs_helpers
4
5
6 def generate(ecs_nested, ecs_version, out_dir):
7 # Load temporary allowlist for default_fields workaround.
8 df_allowlist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_allowlist.yml')
9
10 # base first
11 beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_allowlist, ecs_nested['base']['prefix'])
12
13 allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']
14 # other fieldsets
15 for fieldset_name in sorted(ecs_nested):
16 if 'base' == fieldset_name:
17 continue
18 fieldset = ecs_nested[fieldset_name]
19
20 # Handle when `root:true`
21 if fieldset.get('root', False):
22 beats_fields.extend(fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix']))
23 continue
24
25 beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)
26 beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix'])
27 beats_fields.append(beats_field)
28
29 beats_file = OrderedDict()
30 beats_file['key'] = 'ecs'
31 beats_file['title'] = 'ECS'
32 beats_file['description'] = 'ECS Fields.'
33 beats_file['fields'] = beats_fields
34
35 write_beats_yaml(beats_file, ecs_version, out_dir)
36
37
38 def fieldset_field_array(source_fields, df_allowlist, fieldset_prefix):
39 allowed_keys = ['name', 'level', 'required', 'type', 'object_type',
40 'ignore_above', 'multi_fields', 'format', 'input_format',
41 'output_format', 'output_precision', 'description',
42 'example', 'enabled', 'index', 'path', 'scaling_factor']
43 multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']
44
45 fields = []
46 for nested_field_name in source_fields:
47 ecs_field = source_fields[nested_field_name]
48 beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)
49 if '' == fieldset_prefix:
50 contextual_name = nested_field_name
51 else:
52 contextual_name = '.'.join(nested_field_name.split('.')[1:])
53
54 cleaned_multi_fields = []
55 if 'multi_fields' in ecs_field:
56 for mf in ecs_field['multi_fields']:
57 # Set default_field if necessary. Avoid adding the key if the parent
58 # field already is marked with default_field: false.
59 if not mf['flat_name'] in df_allowlist and ecs_field['flat_name'] in df_allowlist:
60 mf['default_field'] = False
61 cleaned_multi_fields.append(
62 ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))
63 beats_field['multi_fields'] = cleaned_multi_fields
64
65 beats_field['name'] = contextual_name
66
67 if not ecs_field['flat_name'] in df_allowlist:
68 beats_field['default_field'] = False
69
70 fields.append(beats_field)
71 return sorted(fields, key=lambda x: x['name'])
72
73 # Helpers
74
75
76 def write_beats_yaml(beats_file, ecs_version, out_dir):
77 ecs_helpers.make_dirs(join(out_dir, 'beats'))
78 warning = file_header().format(version=ecs_version)
79 ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)
80
81
82 # Templates
83
84
85 def file_header():
86 return """
87 # WARNING! Do not edit this file directly, it was generated by the ECS project,
88 # based on ECS version {version}.
89 # Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.
90
91 """.lstrip()
92
[end of scripts/generators/beats.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py
--- a/scripts/generators/beats.py
+++ b/scripts/generators/beats.py
@@ -39,7 +39,8 @@
allowed_keys = ['name', 'level', 'required', 'type', 'object_type',
'ignore_above', 'multi_fields', 'format', 'input_format',
'output_format', 'output_precision', 'description',
- 'example', 'enabled', 'index', 'path', 'scaling_factor']
+ 'example', 'enabled', 'index', 'doc_values', 'path',
+ 'scaling_factor']
multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']
fields = []
| {"golden_diff": "diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py\n--- a/scripts/generators/beats.py\n+++ b/scripts/generators/beats.py\n@@ -39,7 +39,8 @@\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n- 'example', 'enabled', 'index', 'path', 'scaling_factor']\n+ 'example', 'enabled', 'index', 'doc_values', 'path',\n+ 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n \n fields = []\n", "issue": "`doc_values` parameter not set in Beats artifact \nCertain fields have `index: false` and `doc_values: false` in their ECS definition, like `event.original`:\r\n\r\nhttps://github.com/elastic/ecs/blob/master/schemas/event.yml#L577-L599\r\n\r\nWhen `doc_values: false` is defined in the field definition, it's not being added to the maintained Beats fields YAML artifact:\r\n\r\nhttps://github.com/elastic/ecs/blob/master/generated/beats/fields.ecs.yml#L1737-L1750\n", "before_files": [{"content": "from os.path import join\nfrom collections import OrderedDict\nfrom generators import ecs_helpers\n\n\ndef generate(ecs_nested, ecs_version, out_dir):\n # Load temporary allowlist for default_fields workaround.\n df_allowlist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_allowlist.yml')\n\n # base first\n beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_allowlist, ecs_nested['base']['prefix'])\n\n allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']\n # other fieldsets\n for fieldset_name in sorted(ecs_nested):\n if 'base' == fieldset_name:\n continue\n fieldset = ecs_nested[fieldset_name]\n\n # Handle when `root:true`\n if fieldset.get('root', False):\n beats_fields.extend(fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix']))\n continue\n\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_allowlist, fieldset['prefix'])\n beats_fields.append(beats_field)\n\n beats_file = OrderedDict()\n beats_file['key'] = 'ecs'\n beats_file['title'] = 'ECS'\n beats_file['description'] = 'ECS Fields.'\n beats_file['fields'] = beats_fields\n\n write_beats_yaml(beats_file, ecs_version, out_dir)\n\n\ndef fieldset_field_array(source_fields, df_allowlist, fieldset_prefix):\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n 'example', 'enabled', 'index', 'path', 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n\n fields = []\n for nested_field_name in source_fields:\n ecs_field = source_fields[nested_field_name]\n beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)\n if '' == fieldset_prefix:\n contextual_name = nested_field_name\n else:\n contextual_name = '.'.join(nested_field_name.split('.')[1:])\n\n cleaned_multi_fields = []\n if 'multi_fields' in ecs_field:\n for mf in ecs_field['multi_fields']:\n # Set default_field if necessary. Avoid adding the key if the parent\n # field already is marked with default_field: false.\n if not mf['flat_name'] in df_allowlist and ecs_field['flat_name'] in df_allowlist:\n mf['default_field'] = False\n cleaned_multi_fields.append(\n ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))\n beats_field['multi_fields'] = cleaned_multi_fields\n\n beats_field['name'] = contextual_name\n\n if not ecs_field['flat_name'] in df_allowlist:\n beats_field['default_field'] = False\n\n fields.append(beats_field)\n return sorted(fields, key=lambda x: x['name'])\n\n# Helpers\n\n\ndef write_beats_yaml(beats_file, ecs_version, out_dir):\n ecs_helpers.make_dirs(join(out_dir, 'beats'))\n warning = file_header().format(version=ecs_version)\n ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)\n\n\n# Templates\n\n\ndef file_header():\n return \"\"\"\n# WARNING! Do not edit this file directly, it was generated by the ECS project,\n# based on ECS version {version}.\n# Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.\n\n\"\"\".lstrip()\n", "path": "scripts/generators/beats.py"}]} | 1,658 | 175 |
gh_patches_debug_8516 | rasdani/github-patches | git_diff | iterative__dvc-10005 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exp save: Short option for --message is -M, but for dvc exp run it is -m
It would be nice if the short options of `dvc exp run` and `dvc exp save` for specifying a commit message would be identical. Also, best to use the same options as one would use for `git commit`, i.e., `-m` instead of `-M`.
```
usage: dvc experiments save [-h] [-q | -v] [-f] [--json] [-n <name>] [-I <path>] [-M MESSAGE]
Save current workspace as an experiment.
Documentation: <https://man.dvc.org/exp/save>
options:
-h, --help show this help message and exit
-q, --quiet Be quiet.
-v, --verbose Be verbose.
-f, --force Replace experiment if it already exists.
--json Show output in JSON format.
-n <name>, --name <name>
Human-readable experiment name. If not specified, a name will be auto-generated.
-I <path>, --include-untracked <path>
List of untracked paths to include in the experiment.
-M MESSAGE, --message MESSAGE
Custom commit message to use when committing the experiment.
```
DVC CLI v3.22.1
</issue>
<code>
[start of dvc/commands/experiments/save.py]
1 import argparse
2 import logging
3
4 from dvc.cli.command import CmdBase
5 from dvc.cli.utils import append_doc_link
6 from dvc.exceptions import DvcException
7 from dvc.ui import ui
8
9 logger = logging.getLogger(__name__)
10
11
12 class CmdExperimentsSave(CmdBase):
13 def run(self):
14 try:
15 ref = self.repo.experiments.save(
16 name=self.args.name,
17 force=self.args.force,
18 include_untracked=self.args.include_untracked,
19 message=self.args.message,
20 )
21 except DvcException:
22 logger.exception("failed to save experiment")
23 return 1
24
25 if self.args.json:
26 ui.write_json({"ref": ref})
27 else:
28 name = self.repo.experiments.get_exact_name([ref])[ref]
29 ui.write(f"Experiment has been saved as: {name}")
30
31 return 0
32
33
34 def add_parser(experiments_subparsers, parent_parser):
35 EXPERIMENTS_SAVE_HELP = "Save current workspace as an experiment."
36 save_parser = experiments_subparsers.add_parser(
37 "save",
38 parents=[parent_parser],
39 description=append_doc_link(EXPERIMENTS_SAVE_HELP, "exp/save"),
40 help=EXPERIMENTS_SAVE_HELP,
41 formatter_class=argparse.RawDescriptionHelpFormatter,
42 )
43 save_parser.add_argument(
44 "-f",
45 "--force",
46 action="store_true",
47 default=False,
48 help="Replace experiment if it already exists.",
49 )
50 save_parser.add_argument(
51 "--json",
52 action="store_true",
53 default=False,
54 help="Show output in JSON format.",
55 )
56 save_parser.add_argument(
57 "-n",
58 "--name",
59 default=None,
60 help=(
61 "Human-readable experiment name. If not specified, a name will "
62 "be auto-generated."
63 ),
64 metavar="<name>",
65 )
66 save_parser.add_argument(
67 "-I",
68 "--include-untracked",
69 action="append",
70 default=[],
71 help="List of untracked paths to include in the experiment.",
72 metavar="<path>",
73 )
74 save_parser.add_argument(
75 "-M",
76 "--message",
77 type=str,
78 default=None,
79 help="Custom commit message to use when committing the experiment.",
80 )
81 save_parser.set_defaults(func=CmdExperimentsSave)
82
[end of dvc/commands/experiments/save.py]
[start of dvc/commands/experiments/exec_run.py]
1 import logging
2
3 from dvc.cli.command import CmdBaseNoRepo
4
5 logger = logging.getLogger(__name__)
6
7
8 class CmdExecutorRun(CmdBaseNoRepo):
9 """Run an experiment executor."""
10
11 def run(self):
12 from dvc.repo.experiments.executor.base import BaseExecutor, ExecutorInfo
13 from dvc.utils.serialize import load_json
14
15 info = ExecutorInfo.from_dict(load_json(self.args.infofile))
16 BaseExecutor.reproduce(
17 info=info,
18 rev="",
19 queue=None,
20 log_level=logger.getEffectiveLevel(),
21 infofile=self.args.infofile,
22 copy_paths=self.args.copy_paths,
23 message=self.args.message,
24 )
25 return 0
26
27
28 def add_parser(experiments_subparsers, parent_parser):
29 EXEC_RUN_HELP = "Run an experiment executor."
30 exec_run_parser = experiments_subparsers.add_parser(
31 "exec-run",
32 parents=[parent_parser],
33 description=EXEC_RUN_HELP,
34 add_help=False,
35 )
36 exec_run_parser.add_argument(
37 "--infofile",
38 help="Path to executor info file",
39 default=None,
40 )
41 exec_run_parser.add_argument(
42 "-C",
43 "--copy-paths",
44 action="append",
45 default=[],
46 help=(
47 "List of ignored or untracked paths to copy into the temp directory."
48 " Only used if `--temp` or `--queue` is specified."
49 ),
50 )
51 exec_run_parser.add_argument(
52 "-M",
53 "--message",
54 type=str,
55 default=None,
56 help="Custom commit message to use when committing the experiment.",
57 )
58 exec_run_parser.set_defaults(func=CmdExecutorRun)
59
[end of dvc/commands/experiments/exec_run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/commands/experiments/exec_run.py b/dvc/commands/experiments/exec_run.py
--- a/dvc/commands/experiments/exec_run.py
+++ b/dvc/commands/experiments/exec_run.py
@@ -49,7 +49,7 @@
),
)
exec_run_parser.add_argument(
- "-M",
+ "-m",
"--message",
type=str,
default=None,
diff --git a/dvc/commands/experiments/save.py b/dvc/commands/experiments/save.py
--- a/dvc/commands/experiments/save.py
+++ b/dvc/commands/experiments/save.py
@@ -72,7 +72,7 @@
metavar="<path>",
)
save_parser.add_argument(
- "-M",
+ "-m",
"--message",
type=str,
default=None,
| {"golden_diff": "diff --git a/dvc/commands/experiments/exec_run.py b/dvc/commands/experiments/exec_run.py\n--- a/dvc/commands/experiments/exec_run.py\n+++ b/dvc/commands/experiments/exec_run.py\n@@ -49,7 +49,7 @@\n ),\n )\n exec_run_parser.add_argument(\n- \"-M\",\n+ \"-m\",\n \"--message\",\n type=str,\n default=None,\ndiff --git a/dvc/commands/experiments/save.py b/dvc/commands/experiments/save.py\n--- a/dvc/commands/experiments/save.py\n+++ b/dvc/commands/experiments/save.py\n@@ -72,7 +72,7 @@\n metavar=\"<path>\",\n )\n save_parser.add_argument(\n- \"-M\",\n+ \"-m\",\n \"--message\",\n type=str,\n default=None,\n", "issue": "exp save: Short option for --message is -M, but for dvc exp run it is -m\nIt would be nice if the short options of `dvc exp run` and `dvc exp save` for specifying a commit message would be identical. Also, best to use the same options as one would use for `git commit`, i.e., `-m` instead of `-M`.\r\n\r\n```\r\nusage: dvc experiments save [-h] [-q | -v] [-f] [--json] [-n <name>] [-I <path>] [-M MESSAGE]\r\n\r\nSave current workspace as an experiment.\r\nDocumentation: <https://man.dvc.org/exp/save>\r\n\r\noptions:\r\n -h, --help show this help message and exit\r\n -q, --quiet Be quiet.\r\n -v, --verbose Be verbose.\r\n -f, --force Replace experiment if it already exists.\r\n --json Show output in JSON format.\r\n -n <name>, --name <name>\r\n Human-readable experiment name. If not specified, a name will be auto-generated.\r\n -I <path>, --include-untracked <path>\r\n List of untracked paths to include in the experiment.\r\n -M MESSAGE, --message MESSAGE\r\n Custom commit message to use when committing the experiment.\r\n```\r\n\r\nDVC CLI v3.22.1\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.cli.command import CmdBase\nfrom dvc.cli.utils import append_doc_link\nfrom dvc.exceptions import DvcException\nfrom dvc.ui import ui\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExperimentsSave(CmdBase):\n def run(self):\n try:\n ref = self.repo.experiments.save(\n name=self.args.name,\n force=self.args.force,\n include_untracked=self.args.include_untracked,\n message=self.args.message,\n )\n except DvcException:\n logger.exception(\"failed to save experiment\")\n return 1\n\n if self.args.json:\n ui.write_json({\"ref\": ref})\n else:\n name = self.repo.experiments.get_exact_name([ref])[ref]\n ui.write(f\"Experiment has been saved as: {name}\")\n\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXPERIMENTS_SAVE_HELP = \"Save current workspace as an experiment.\"\n save_parser = experiments_subparsers.add_parser(\n \"save\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_SAVE_HELP, \"exp/save\"),\n help=EXPERIMENTS_SAVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n save_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Replace experiment if it already exists.\",\n )\n save_parser.add_argument(\n \"--json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n save_parser.add_argument(\n \"-n\",\n \"--name\",\n default=None,\n help=(\n \"Human-readable experiment name. If not specified, a name will \"\n \"be auto-generated.\"\n ),\n metavar=\"<name>\",\n )\n save_parser.add_argument(\n \"-I\",\n \"--include-untracked\",\n action=\"append\",\n default=[],\n help=\"List of untracked paths to include in the experiment.\",\n metavar=\"<path>\",\n )\n save_parser.add_argument(\n \"-M\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n save_parser.set_defaults(func=CmdExperimentsSave)\n", "path": "dvc/commands/experiments/save.py"}, {"content": "import logging\n\nfrom dvc.cli.command import CmdBaseNoRepo\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdExecutorRun(CmdBaseNoRepo):\n \"\"\"Run an experiment executor.\"\"\"\n\n def run(self):\n from dvc.repo.experiments.executor.base import BaseExecutor, ExecutorInfo\n from dvc.utils.serialize import load_json\n\n info = ExecutorInfo.from_dict(load_json(self.args.infofile))\n BaseExecutor.reproduce(\n info=info,\n rev=\"\",\n queue=None,\n log_level=logger.getEffectiveLevel(),\n infofile=self.args.infofile,\n copy_paths=self.args.copy_paths,\n message=self.args.message,\n )\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXEC_RUN_HELP = \"Run an experiment executor.\"\n exec_run_parser = experiments_subparsers.add_parser(\n \"exec-run\",\n parents=[parent_parser],\n description=EXEC_RUN_HELP,\n add_help=False,\n )\n exec_run_parser.add_argument(\n \"--infofile\",\n help=\"Path to executor info file\",\n default=None,\n )\n exec_run_parser.add_argument(\n \"-C\",\n \"--copy-paths\",\n action=\"append\",\n default=[],\n help=(\n \"List of ignored or untracked paths to copy into the temp directory.\"\n \" Only used if `--temp` or `--queue` is specified.\"\n ),\n )\n exec_run_parser.add_argument(\n \"-M\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n exec_run_parser.set_defaults(func=CmdExecutorRun)\n", "path": "dvc/commands/experiments/exec_run.py"}]} | 1,947 | 184 |
gh_patches_debug_22467 | rasdani/github-patches | git_diff | pre-commit__pre-commit-400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stashed changes lost if hook fails with non-UTF-8 diff containing trailing whitespace
Hi,
A colleague almost lost all the changes she was working on after launching a `git commit` (with zero file added) and `pre-commit` crashing without restoring its [patch](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/staged_files_only.py#L15).
Here is the terminal message she got:
```
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
An unexpected error has occurred: CalledProcessError: Command: ['git', 'apply', 'C:\\Users\\toto\\.pre-commit\\patch1471341002']
```
This seems very similar to a past solved issue:
https://github.com/pre-commit/pre-commit/issues/176
I think it had to do with CRLF conversion.
I'm going to try to reproduce this.
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import time
7
8 from pre_commit.util import CalledProcessError
9
10
11 logger = logging.getLogger('pre_commit')
12
13
14 @contextlib.contextmanager
15 def staged_files_only(cmd_runner):
16 """Clear any unstaged changes from the git working directory inside this
17 context.
18
19 Args:
20 cmd_runner - PrefixedCommandRunner
21 """
22 # Determine if there are unstaged files
23 retcode, diff_stdout_binary, _ = cmd_runner.run(
24 [
25 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',
26 '--no-color',
27 ],
28 retcode=None,
29 encoding=None,
30 )
31 if retcode and diff_stdout_binary.strip():
32 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
33 logger.warning('Unstaged files detected.')
34 logger.info(
35 'Stashing unstaged files to {0}.'.format(patch_filename),
36 )
37 # Save the current unstaged changes as a patch
38 with io.open(patch_filename, 'wb') as patch_file:
39 patch_file.write(diff_stdout_binary)
40
41 # Clear the working directory of unstaged changes
42 cmd_runner.run(['git', 'checkout', '--', '.'])
43 try:
44 yield
45 finally:
46 # Try to apply the patch we saved
47 try:
48 cmd_runner.run(['git', 'apply', patch_filename])
49 except CalledProcessError:
50 logger.warning(
51 'Stashed changes conflicted with hook auto-fixes... '
52 'Rolling back fixes...'
53 )
54 # We failed to apply the patch, presumably due to fixes made
55 # by hooks.
56 # Roll back the changes made by hooks.
57 cmd_runner.run(['git', 'checkout', '--', '.'])
58 cmd_runner.run(['git', 'apply', patch_filename])
59 logger.info('Restored changes from {0}.'.format(patch_filename))
60 else:
61 # There weren't any staged files so we don't need to do anything
62 # special
63 yield
64
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -45,7 +45,7 @@
finally:
# Try to apply the patch we saved
try:
- cmd_runner.run(['git', 'apply', patch_filename])
+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)
except CalledProcessError:
logger.warning(
'Stashed changes conflicted with hook auto-fixes... '
@@ -55,7 +55,7 @@
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(['git', 'checkout', '--', '.'])
- cmd_runner.run(['git', 'apply', patch_filename])
+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)
logger.info('Restored changes from {0}.'.format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -45,7 +45,7 @@\n finally:\n # Try to apply the patch we saved\n try:\n- cmd_runner.run(['git', 'apply', patch_filename])\n+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n@@ -55,7 +55,7 @@\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n- cmd_runner.run(['git', 'apply', patch_filename])\n+ cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n", "issue": "Stashed changes lost if hook fails with non-UTF-8 diff containing trailing whitespace\nHi,\n\nA colleague almost lost all the changes she was working on after launching a `git commit` (with zero file added) and `pre-commit` crashing without restoring its [patch](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/staged_files_only.py#L15).\n\nHere is the terminal message she got:\n\n```\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\nAn unexpected error has occurred: CalledProcessError: Command: ['git', 'apply', 'C:\\\\Users\\\\toto\\\\.pre-commit\\\\patch1471341002']\n```\n\nThis seems very similar to a past solved issue:\nhttps://github.com/pre-commit/pre-commit/issues/176\n\nI think it had to do with CRLF conversion.\nI'm going to try to reproduce this.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n [\n 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',\n '--no-color',\n ],\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,291 | 231 |
gh_patches_debug_28280 | rasdani/github-patches | git_diff | sanic-org__sanic-2154 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow later websockets releases
**Describe the bug**
`websockets` is [pinned](https://github.com/sanic-org/sanic/blob/main/setup.py#L91
). The latest `websockets` is 9.1 and this release is fixing a [authentication vulnerability](https://websockets.readthedocs.io/en/stable/changelog.html) which was introduced with 8.0.
**Expected behavior**
Allow to use `websockets>9`
**Environment (please complete the following information):**
- OS: probably all
- Version: current
**Additional context**
n/a
</issue>
<code>
[start of sanic/websocket.py]
1 from typing import (
2 Any,
3 Awaitable,
4 Callable,
5 Dict,
6 List,
7 MutableMapping,
8 Optional,
9 Union,
10 )
11
12 from httptools import HttpParserUpgrade # type: ignore
13 from websockets import ( # type: ignore
14 ConnectionClosed,
15 InvalidHandshake,
16 WebSocketCommonProtocol,
17 handshake,
18 )
19
20 from sanic.exceptions import InvalidUsage
21 from sanic.server import HttpProtocol
22
23
24 __all__ = ["ConnectionClosed", "WebSocketProtocol", "WebSocketConnection"]
25
26 ASIMessage = MutableMapping[str, Any]
27
28
29 class WebSocketProtocol(HttpProtocol):
30 def __init__(
31 self,
32 *args,
33 websocket_timeout=10,
34 websocket_max_size=None,
35 websocket_max_queue=None,
36 websocket_read_limit=2 ** 16,
37 websocket_write_limit=2 ** 16,
38 websocket_ping_interval=20,
39 websocket_ping_timeout=20,
40 **kwargs
41 ):
42 super().__init__(*args, **kwargs)
43 self.websocket = None
44 # self.app = None
45 self.websocket_timeout = websocket_timeout
46 self.websocket_max_size = websocket_max_size
47 self.websocket_max_queue = websocket_max_queue
48 self.websocket_read_limit = websocket_read_limit
49 self.websocket_write_limit = websocket_write_limit
50 self.websocket_ping_interval = websocket_ping_interval
51 self.websocket_ping_timeout = websocket_ping_timeout
52
53 # timeouts make no sense for websocket routes
54 def request_timeout_callback(self):
55 if self.websocket is None:
56 super().request_timeout_callback()
57
58 def response_timeout_callback(self):
59 if self.websocket is None:
60 super().response_timeout_callback()
61
62 def keep_alive_timeout_callback(self):
63 if self.websocket is None:
64 super().keep_alive_timeout_callback()
65
66 def connection_lost(self, exc):
67 if self.websocket is not None:
68 self.websocket.connection_lost(exc)
69 super().connection_lost(exc)
70
71 def data_received(self, data):
72 if self.websocket is not None:
73 # pass the data to the websocket protocol
74 self.websocket.data_received(data)
75 else:
76 try:
77 super().data_received(data)
78 except HttpParserUpgrade:
79 # this is okay, it just indicates we've got an upgrade request
80 pass
81
82 def write_response(self, response):
83 if self.websocket is not None:
84 # websocket requests do not write a response
85 self.transport.close()
86 else:
87 super().write_response(response)
88
89 async def websocket_handshake(self, request, subprotocols=None):
90 # let the websockets package do the handshake with the client
91 headers = {}
92
93 try:
94 key = handshake.check_request(request.headers)
95 handshake.build_response(headers, key)
96 except InvalidHandshake:
97 raise InvalidUsage("Invalid websocket request")
98
99 subprotocol = None
100 if subprotocols and "Sec-Websocket-Protocol" in request.headers:
101 # select a subprotocol
102 client_subprotocols = [
103 p.strip()
104 for p in request.headers["Sec-Websocket-Protocol"].split(",")
105 ]
106 for p in client_subprotocols:
107 if p in subprotocols:
108 subprotocol = p
109 headers["Sec-Websocket-Protocol"] = subprotocol
110 break
111
112 # write the 101 response back to the client
113 rv = b"HTTP/1.1 101 Switching Protocols\r\n"
114 for k, v in headers.items():
115 rv += k.encode("utf-8") + b": " + v.encode("utf-8") + b"\r\n"
116 rv += b"\r\n"
117 request.transport.write(rv)
118
119 # hook up the websocket protocol
120 self.websocket = WebSocketCommonProtocol(
121 close_timeout=self.websocket_timeout,
122 max_size=self.websocket_max_size,
123 max_queue=self.websocket_max_queue,
124 read_limit=self.websocket_read_limit,
125 write_limit=self.websocket_write_limit,
126 ping_interval=self.websocket_ping_interval,
127 ping_timeout=self.websocket_ping_timeout,
128 )
129 # Following two lines are required for websockets 8.x
130 self.websocket.is_client = False
131 self.websocket.side = "server"
132 self.websocket.subprotocol = subprotocol
133 self.websocket.connection_made(request.transport)
134 self.websocket.connection_open()
135 return self.websocket
136
137
138 class WebSocketConnection:
139
140 # TODO
141 # - Implement ping/pong
142
143 def __init__(
144 self,
145 send: Callable[[ASIMessage], Awaitable[None]],
146 receive: Callable[[], Awaitable[ASIMessage]],
147 subprotocols: Optional[List[str]] = None,
148 ) -> None:
149 self._send = send
150 self._receive = receive
151 self.subprotocols = subprotocols or []
152
153 async def send(self, data: Union[str, bytes], *args, **kwargs) -> None:
154 message: Dict[str, Union[str, bytes]] = {"type": "websocket.send"}
155
156 if isinstance(data, bytes):
157 message.update({"bytes": data})
158 else:
159 message.update({"text": str(data)})
160
161 await self._send(message)
162
163 async def recv(self, *args, **kwargs) -> Optional[str]:
164 message = await self._receive()
165
166 if message["type"] == "websocket.receive":
167 return message["text"]
168 elif message["type"] == "websocket.disconnect":
169 pass
170
171 return None
172
173 receive = recv
174
175 async def accept(self) -> None:
176 await self._send(
177 {
178 "type": "websocket.accept",
179 "subprotocol": ",".join(list(self.subprotocols)),
180 }
181 )
182
183 async def close(self) -> None:
184 pass
185
[end of sanic/websocket.py]
[start of setup.py]
1 """
2 Sanic
3 """
4 import codecs
5 import os
6 import re
7 import sys
8
9 from distutils.util import strtobool
10
11 from setuptools import find_packages, setup
12 from setuptools.command.test import test as TestCommand
13
14
15 class PyTest(TestCommand):
16 """
17 Provide a Test runner to be used from setup.py to run unit tests
18 """
19
20 user_options = [("pytest-args=", "a", "Arguments to pass to pytest")]
21
22 def initialize_options(self):
23 TestCommand.initialize_options(self)
24 self.pytest_args = ""
25
26 def run_tests(self):
27 import shlex
28
29 import pytest
30
31 errno = pytest.main(shlex.split(self.pytest_args))
32 sys.exit(errno)
33
34
35 def open_local(paths, mode="r", encoding="utf8"):
36 path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)
37
38 return codecs.open(path, mode, encoding)
39
40
41 with open_local(["sanic", "__version__.py"], encoding="latin1") as fp:
42 try:
43 version = re.findall(
44 r"^__version__ = \"([^']+)\"\r?$", fp.read(), re.M
45 )[0]
46 except IndexError:
47 raise RuntimeError("Unable to determine version.")
48
49 with open_local(["README.rst"]) as rm:
50 long_description = rm.read()
51
52 setup_kwargs = {
53 "name": "sanic",
54 "version": version,
55 "url": "http://github.com/sanic-org/sanic/",
56 "license": "MIT",
57 "author": "Sanic Community",
58 "author_email": "[email protected]",
59 "description": (
60 "A web server and web framework that's written to go fast. "
61 "Build fast. Run fast."
62 ),
63 "long_description": long_description,
64 "packages": find_packages(),
65 "package_data": {"sanic": ["py.typed"]},
66 "platforms": "any",
67 "python_requires": ">=3.7",
68 "classifiers": [
69 "Development Status :: 4 - Beta",
70 "Environment :: Web Environment",
71 "License :: OSI Approved :: MIT License",
72 "Programming Language :: Python :: 3.7",
73 "Programming Language :: Python :: 3.8",
74 "Programming Language :: Python :: 3.9",
75 ],
76 "entry_points": {"console_scripts": ["sanic = sanic.__main__:main"]},
77 }
78
79 env_dependency = (
80 '; sys_platform != "win32" ' 'and implementation_name == "cpython"'
81 )
82 ujson = "ujson>=1.35" + env_dependency
83 uvloop = "uvloop>=0.5.3" + env_dependency
84
85 requirements = [
86 "sanic-routing>=0.6.0",
87 "httptools>=0.0.10",
88 uvloop,
89 ujson,
90 "aiofiles>=0.6.0",
91 "websockets>=8.1,<9.0",
92 "multidict>=5.0,<6.0",
93 ]
94
95 tests_require = [
96 "sanic-testing",
97 "pytest==5.2.1",
98 "multidict>=5.0,<6.0",
99 "gunicorn==20.0.4",
100 "pytest-cov",
101 "beautifulsoup4",
102 uvloop,
103 ujson,
104 "pytest-sanic",
105 "pytest-sugar",
106 "pytest-benchmark",
107 ]
108
109 docs_require = [
110 "sphinx>=2.1.2",
111 "sphinx_rtd_theme",
112 "recommonmark>=0.5.0",
113 "docutils",
114 "pygments",
115 ]
116
117 dev_require = tests_require + [
118 "aiofiles",
119 "tox",
120 "black",
121 "flake8",
122 "bandit",
123 "towncrier",
124 ]
125
126 all_require = dev_require + docs_require
127
128 if strtobool(os.environ.get("SANIC_NO_UJSON", "no")):
129 print("Installing without uJSON")
130 requirements.remove(ujson)
131 tests_require.remove(ujson)
132
133 # 'nt' means windows OS
134 if strtobool(os.environ.get("SANIC_NO_UVLOOP", "no")):
135 print("Installing without uvLoop")
136 requirements.remove(uvloop)
137 tests_require.remove(uvloop)
138
139 extras_require = {
140 "test": tests_require,
141 "dev": dev_require,
142 "docs": docs_require,
143 "all": all_require,
144 }
145
146 setup_kwargs["install_requires"] = requirements
147 setup_kwargs["tests_require"] = tests_require
148 setup_kwargs["extras_require"] = extras_require
149 setup_kwargs["cmdclass"] = {"test": PyTest}
150 setup(**setup_kwargs)
151
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/websocket.py b/sanic/websocket.py
--- a/sanic/websocket.py
+++ b/sanic/websocket.py
@@ -14,9 +14,13 @@
ConnectionClosed,
InvalidHandshake,
WebSocketCommonProtocol,
- handshake,
)
+# Despite the "legacy" namespace, the primary maintainer of websockets
+# committed to maintaining backwards-compatibility until 2026 and will
+# consider extending it if sanic continues depending on this module.
+from websockets.legacy import handshake
+
from sanic.exceptions import InvalidUsage
from sanic.server import HttpProtocol
@@ -126,7 +130,9 @@
ping_interval=self.websocket_ping_interval,
ping_timeout=self.websocket_ping_timeout,
)
- # Following two lines are required for websockets 8.x
+ # we use WebSocketCommonProtocol because we don't want the handshake
+ # logic from WebSocketServerProtocol; however, we must tell it that
+ # we're running on the server side
self.websocket.is_client = False
self.websocket.side = "server"
self.websocket.subprotocol = subprotocol
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -88,12 +88,12 @@
uvloop,
ujson,
"aiofiles>=0.6.0",
- "websockets>=8.1,<9.0",
+ "websockets>=9.0",
"multidict>=5.0,<6.0",
]
tests_require = [
- "sanic-testing",
+ "sanic-testing>=0.6.0",
"pytest==5.2.1",
"multidict>=5.0,<6.0",
"gunicorn==20.0.4",
| {"golden_diff": "diff --git a/sanic/websocket.py b/sanic/websocket.py\n--- a/sanic/websocket.py\n+++ b/sanic/websocket.py\n@@ -14,9 +14,13 @@\n ConnectionClosed,\n InvalidHandshake,\n WebSocketCommonProtocol,\n- handshake,\n )\n \n+# Despite the \"legacy\" namespace, the primary maintainer of websockets\n+# committed to maintaining backwards-compatibility until 2026 and will\n+# consider extending it if sanic continues depending on this module.\n+from websockets.legacy import handshake\n+\n from sanic.exceptions import InvalidUsage\n from sanic.server import HttpProtocol\n \n@@ -126,7 +130,9 @@\n ping_interval=self.websocket_ping_interval,\n ping_timeout=self.websocket_ping_timeout,\n )\n- # Following two lines are required for websockets 8.x\n+ # we use WebSocketCommonProtocol because we don't want the handshake\n+ # logic from WebSocketServerProtocol; however, we must tell it that\n+ # we're running on the server side\n self.websocket.is_client = False\n self.websocket.side = \"server\"\n self.websocket.subprotocol = subprotocol\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -88,12 +88,12 @@\n uvloop,\n ujson,\n \"aiofiles>=0.6.0\",\n- \"websockets>=8.1,<9.0\",\n+ \"websockets>=9.0\",\n \"multidict>=5.0,<6.0\",\n ]\n \n tests_require = [\n- \"sanic-testing\",\n+ \"sanic-testing>=0.6.0\",\n \"pytest==5.2.1\",\n \"multidict>=5.0,<6.0\",\n \"gunicorn==20.0.4\",\n", "issue": "Allow later websockets releases\n**Describe the bug**\r\n`websockets` is [pinned](https://github.com/sanic-org/sanic/blob/main/setup.py#L91\r\n). The latest `websockets` is 9.1 and this release is fixing a [authentication vulnerability](https://websockets.readthedocs.io/en/stable/changelog.html) which was introduced with 8.0.\r\n\r\n**Expected behavior**\r\nAllow to use `websockets>9`\r\n\r\n**Environment (please complete the following information):**\r\n - OS: probably all\r\n - Version: current\r\n\r\n**Additional context**\r\nn/a\n", "before_files": [{"content": "from typing import (\n Any,\n Awaitable,\n Callable,\n Dict,\n List,\n MutableMapping,\n Optional,\n Union,\n)\n\nfrom httptools import HttpParserUpgrade # type: ignore\nfrom websockets import ( # type: ignore\n ConnectionClosed,\n InvalidHandshake,\n WebSocketCommonProtocol,\n handshake,\n)\n\nfrom sanic.exceptions import InvalidUsage\nfrom sanic.server import HttpProtocol\n\n\n__all__ = [\"ConnectionClosed\", \"WebSocketProtocol\", \"WebSocketConnection\"]\n\nASIMessage = MutableMapping[str, Any]\n\n\nclass WebSocketProtocol(HttpProtocol):\n def __init__(\n self,\n *args,\n websocket_timeout=10,\n websocket_max_size=None,\n websocket_max_queue=None,\n websocket_read_limit=2 ** 16,\n websocket_write_limit=2 ** 16,\n websocket_ping_interval=20,\n websocket_ping_timeout=20,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.websocket = None\n # self.app = None\n self.websocket_timeout = websocket_timeout\n self.websocket_max_size = websocket_max_size\n self.websocket_max_queue = websocket_max_queue\n self.websocket_read_limit = websocket_read_limit\n self.websocket_write_limit = websocket_write_limit\n self.websocket_ping_interval = websocket_ping_interval\n self.websocket_ping_timeout = websocket_ping_timeout\n\n # timeouts make no sense for websocket routes\n def request_timeout_callback(self):\n if self.websocket is None:\n super().request_timeout_callback()\n\n def response_timeout_callback(self):\n if self.websocket is None:\n super().response_timeout_callback()\n\n def keep_alive_timeout_callback(self):\n if self.websocket is None:\n super().keep_alive_timeout_callback()\n\n def connection_lost(self, exc):\n if self.websocket is not None:\n self.websocket.connection_lost(exc)\n super().connection_lost(exc)\n\n def data_received(self, data):\n if self.websocket is not None:\n # pass the data to the websocket protocol\n self.websocket.data_received(data)\n else:\n try:\n super().data_received(data)\n except HttpParserUpgrade:\n # this is okay, it just indicates we've got an upgrade request\n pass\n\n def write_response(self, response):\n if self.websocket is not None:\n # websocket requests do not write a response\n self.transport.close()\n else:\n super().write_response(response)\n\n async def websocket_handshake(self, request, subprotocols=None):\n # let the websockets package do the handshake with the client\n headers = {}\n\n try:\n key = handshake.check_request(request.headers)\n handshake.build_response(headers, key)\n except InvalidHandshake:\n raise InvalidUsage(\"Invalid websocket request\")\n\n subprotocol = None\n if subprotocols and \"Sec-Websocket-Protocol\" in request.headers:\n # select a subprotocol\n client_subprotocols = [\n p.strip()\n for p in request.headers[\"Sec-Websocket-Protocol\"].split(\",\")\n ]\n for p in client_subprotocols:\n if p in subprotocols:\n subprotocol = p\n headers[\"Sec-Websocket-Protocol\"] = subprotocol\n break\n\n # write the 101 response back to the client\n rv = b\"HTTP/1.1 101 Switching Protocols\\r\\n\"\n for k, v in headers.items():\n rv += k.encode(\"utf-8\") + b\": \" + v.encode(\"utf-8\") + b\"\\r\\n\"\n rv += b\"\\r\\n\"\n request.transport.write(rv)\n\n # hook up the websocket protocol\n self.websocket = WebSocketCommonProtocol(\n close_timeout=self.websocket_timeout,\n max_size=self.websocket_max_size,\n max_queue=self.websocket_max_queue,\n read_limit=self.websocket_read_limit,\n write_limit=self.websocket_write_limit,\n ping_interval=self.websocket_ping_interval,\n ping_timeout=self.websocket_ping_timeout,\n )\n # Following two lines are required for websockets 8.x\n self.websocket.is_client = False\n self.websocket.side = \"server\"\n self.websocket.subprotocol = subprotocol\n self.websocket.connection_made(request.transport)\n self.websocket.connection_open()\n return self.websocket\n\n\nclass WebSocketConnection:\n\n # TODO\n # - Implement ping/pong\n\n def __init__(\n self,\n send: Callable[[ASIMessage], Awaitable[None]],\n receive: Callable[[], Awaitable[ASIMessage]],\n subprotocols: Optional[List[str]] = None,\n ) -> None:\n self._send = send\n self._receive = receive\n self.subprotocols = subprotocols or []\n\n async def send(self, data: Union[str, bytes], *args, **kwargs) -> None:\n message: Dict[str, Union[str, bytes]] = {\"type\": \"websocket.send\"}\n\n if isinstance(data, bytes):\n message.update({\"bytes\": data})\n else:\n message.update({\"text\": str(data)})\n\n await self._send(message)\n\n async def recv(self, *args, **kwargs) -> Optional[str]:\n message = await self._receive()\n\n if message[\"type\"] == \"websocket.receive\":\n return message[\"text\"]\n elif message[\"type\"] == \"websocket.disconnect\":\n pass\n\n return None\n\n receive = recv\n\n async def accept(self) -> None:\n await self._send(\n {\n \"type\": \"websocket.accept\",\n \"subprotocol\": \",\".join(list(self.subprotocols)),\n }\n )\n\n async def close(self) -> None:\n pass\n", "path": "sanic/websocket.py"}, {"content": "\"\"\"\nSanic\n\"\"\"\nimport codecs\nimport os\nimport re\nimport sys\n\nfrom distutils.util import strtobool\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n \"\"\"\n Provide a Test runner to be used from setup.py to run unit tests\n \"\"\"\n\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to pytest\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def run_tests(self):\n import shlex\n\n import pytest\n\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ndef open_local(paths, mode=\"r\", encoding=\"utf8\"):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)\n\n return codecs.open(path, mode, encoding)\n\n\nwith open_local([\"sanic\", \"__version__.py\"], encoding=\"latin1\") as fp:\n try:\n version = re.findall(\n r\"^__version__ = \\\"([^']+)\\\"\\r?$\", fp.read(), re.M\n )[0]\n except IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\nwith open_local([\"README.rst\"]) as rm:\n long_description = rm.read()\n\nsetup_kwargs = {\n \"name\": \"sanic\",\n \"version\": version,\n \"url\": \"http://github.com/sanic-org/sanic/\",\n \"license\": \"MIT\",\n \"author\": \"Sanic Community\",\n \"author_email\": \"[email protected]\",\n \"description\": (\n \"A web server and web framework that's written to go fast. \"\n \"Build fast. Run fast.\"\n ),\n \"long_description\": long_description,\n \"packages\": find_packages(),\n \"package_data\": {\"sanic\": [\"py.typed\"]},\n \"platforms\": \"any\",\n \"python_requires\": \">=3.7\",\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n \"entry_points\": {\"console_scripts\": [\"sanic = sanic.__main__:main\"]},\n}\n\nenv_dependency = (\n '; sys_platform != \"win32\" ' 'and implementation_name == \"cpython\"'\n)\nujson = \"ujson>=1.35\" + env_dependency\nuvloop = \"uvloop>=0.5.3\" + env_dependency\n\nrequirements = [\n \"sanic-routing>=0.6.0\",\n \"httptools>=0.0.10\",\n uvloop,\n ujson,\n \"aiofiles>=0.6.0\",\n \"websockets>=8.1,<9.0\",\n \"multidict>=5.0,<6.0\",\n]\n\ntests_require = [\n \"sanic-testing\",\n \"pytest==5.2.1\",\n \"multidict>=5.0,<6.0\",\n \"gunicorn==20.0.4\",\n \"pytest-cov\",\n \"beautifulsoup4\",\n uvloop,\n ujson,\n \"pytest-sanic\",\n \"pytest-sugar\",\n \"pytest-benchmark\",\n]\n\ndocs_require = [\n \"sphinx>=2.1.2\",\n \"sphinx_rtd_theme\",\n \"recommonmark>=0.5.0\",\n \"docutils\",\n \"pygments\",\n]\n\ndev_require = tests_require + [\n \"aiofiles\",\n \"tox\",\n \"black\",\n \"flake8\",\n \"bandit\",\n \"towncrier\",\n]\n\nall_require = dev_require + docs_require\n\nif strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n print(\"Installing without uJSON\")\n requirements.remove(ujson)\n tests_require.remove(ujson)\n\n# 'nt' means windows OS\nif strtobool(os.environ.get(\"SANIC_NO_UVLOOP\", \"no\")):\n print(\"Installing without uvLoop\")\n requirements.remove(uvloop)\n tests_require.remove(uvloop)\n\nextras_require = {\n \"test\": tests_require,\n \"dev\": dev_require,\n \"docs\": docs_require,\n \"all\": all_require,\n}\n\nsetup_kwargs[\"install_requires\"] = requirements\nsetup_kwargs[\"tests_require\"] = tests_require\nsetup_kwargs[\"extras_require\"] = extras_require\nsetup_kwargs[\"cmdclass\"] = {\"test\": PyTest}\nsetup(**setup_kwargs)\n", "path": "setup.py"}]} | 3,677 | 406 |
gh_patches_debug_65236 | rasdani/github-patches | git_diff | streamlink__streamlink-5698 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.btv: No playable streams found
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Your Streamlink version (6.4.2+1.g7e722ec1) is up to date!
### Description
The plug-in does not display video. It displays errors shown in the logs below.
### Debug log
```text
streamlink --loglevel=debug "https://btvplus.bg/live/" best
[cli][debug] OS: Linux-6.2.0-35-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.12
[cli][debug] OpenSSL: OpenSSL 3.0.2 15 Mar 2022
[cli][debug] Streamlink: 6.4.2+1.g7e722ec1
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.5.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.8.0
[cli][debug] pycountry: 20.7.3
[cli][debug] pycryptodome: 3.17
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.10.3
[cli][debug] typing-extensions: 4.7.1
[cli][debug] urllib3: 1.26.16
[cli][debug] websocket-client: 1.2.3
[cli][debug] Arguments:
[cli][debug] url=https://btvplus.bg/live/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/
[cli][info] Available streams: live (worst, best)
[cli][info] Opening stream: live (hls)
[cli][info] Starting player: /usr/bin/vlc
[stream.hls][debug] Reloading playlist
[cli][debug] Pre-buffering 8192 bytes
[stream.hls][error] Attempted to play a variant playlist, use 'hls://https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8' instead
[stream.segmented][debug] Closing worker thread
[stream.segmented][debug] Closing writer thread
[cli][error] Try 1/1: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']> (No data returned from stream)
error: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']>, tried 1 times, exiting
[cli][info] Closing currently open stream...
```
</issue>
<code>
[start of src/streamlink/plugins/btv.py]
1 """
2 $description A privately owned Bulgarian live TV channel.
3 $url btvplus.bg
4 $type live
5 $region Bulgaria
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.hls import HLSStream
14
15
16 log = logging.getLogger(__name__)
17
18
19 @pluginmatcher(re.compile(
20 r"https?://(?:www\.)?btvplus\.bg/live/?",
21 ))
22 class BTV(Plugin):
23 URL_API = "https://btvplus.bg/lbin/v3/btvplus/player_config.php"
24
25 def _get_streams(self):
26 media_id = self.session.http.get(self.url, schema=validate.Schema(
27 re.compile(r"media_id=(\d+)"),
28 validate.any(None, validate.get(1)),
29 ))
30 if media_id is None:
31 return
32
33 stream_url = self.session.http.get(
34 self.URL_API,
35 params={
36 "media_id": media_id,
37 },
38 schema=validate.Schema(
39 validate.any(
40 validate.all(
41 validate.regex(re.compile(r"geo_blocked_stream")),
42 validate.get(0),
43 ),
44 validate.all(
45 validate.parse_json(),
46 {
47 "status": "ok",
48 "info": {
49 "file": validate.url(path=validate.endswith(".m3u8")),
50 },
51 },
52 validate.get(("info", "file")),
53 ),
54 ),
55 ),
56 )
57 if not stream_url:
58 return
59
60 if stream_url == "geo_blocked_stream":
61 log.error("The content is not available in your region")
62 return
63
64 return {"live": HLSStream(self.session, stream_url)}
65
66
67 __plugin__ = BTV
68
[end of src/streamlink/plugins/btv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py
--- a/src/streamlink/plugins/btv.py
+++ b/src/streamlink/plugins/btv.py
@@ -61,7 +61,7 @@
log.error("The content is not available in your region")
return
- return {"live": HLSStream(self.session, stream_url)}
+ return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = BTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py\n--- a/src/streamlink/plugins/btv.py\n+++ b/src/streamlink/plugins/btv.py\n@@ -61,7 +61,7 @@\n log.error(\"The content is not available in your region\")\n return\n \n- return {\"live\": HLSStream(self.session, stream_url)}\n+ return HLSStream.parse_variant_playlist(self.session, stream_url)\n \n \n __plugin__ = BTV\n", "issue": "plugins.btv: No playable streams found\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nYour Streamlink version (6.4.2+1.g7e722ec1) is up to date!\n\n### Description\n\nThe plug-in does not display video. It displays errors shown in the logs below.\r\n\n\n### Debug log\n\n```text\nstreamlink --loglevel=debug \"https://btvplus.bg/live/\" best\r\n[cli][debug] OS: Linux-6.2.0-35-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.12\r\n[cli][debug] OpenSSL: OpenSSL 3.0.2 15 Mar 2022\r\n[cli][debug] Streamlink: 6.4.2+1.g7e722ec1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.5.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.8.0\r\n[cli][debug] pycountry: 20.7.3\r\n[cli][debug] pycryptodome: 3.17\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.10.3\r\n[cli][debug] typing-extensions: 4.7.1\r\n[cli][debug] urllib3: 1.26.16\r\n[cli][debug] websocket-client: 1.2.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://btvplus.bg/live/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin btv for URL https://btvplus.bg/live/\r\n[cli][info] Available streams: live (worst, best)\r\n[cli][info] Opening stream: live (hls)\r\n[cli][info] Starting player: /usr/bin/vlc\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][error] Attempted to play a variant playlist, use 'hls://https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8' instead\r\n[stream.segmented][debug] Closing worker thread\r\n[stream.segmented][debug] Closing writer thread\r\n[cli][error] Try 1/1: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']> (No data returned from stream)\r\nerror: Could not open stream <HLSStream ['hls', 'https://cdn.bweb.bg/live/PhRBlmfjy0uVGxaj1_BMiw/1701627017/61065646.m3u8']>, tried 1 times, exiting\r\n[cli][info] Closing currently open stream...\n```\n\n", "before_files": [{"content": "\"\"\"\n$description A privately owned Bulgarian live TV channel.\n$url btvplus.bg\n$type live\n$region Bulgaria\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?btvplus\\.bg/live/?\",\n))\nclass BTV(Plugin):\n URL_API = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n\n def _get_streams(self):\n media_id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"media_id=(\\d+)\"),\n validate.any(None, validate.get(1)),\n ))\n if media_id is None:\n return\n\n stream_url = self.session.http.get(\n self.URL_API,\n params={\n \"media_id\": media_id,\n },\n schema=validate.Schema(\n validate.any(\n validate.all(\n validate.regex(re.compile(r\"geo_blocked_stream\")),\n validate.get(0),\n ),\n validate.all(\n validate.parse_json(),\n {\n \"status\": \"ok\",\n \"info\": {\n \"file\": validate.url(path=validate.endswith(\".m3u8\")),\n },\n },\n validate.get((\"info\", \"file\")),\n ),\n ),\n ),\n )\n if not stream_url:\n return\n\n if stream_url == \"geo_blocked_stream\":\n log.error(\"The content is not available in your region\")\n return\n\n return {\"live\": HLSStream(self.session, stream_url)}\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}]} | 1,973 | 104 |
gh_patches_debug_7467 | rasdani/github-patches | git_diff | sublimelsp__LSP-660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
when cancelling the symbols panel, the last symbol is selected
https://github.com/tomv564/LSP/blob/be904c56fddf35f724486de405a168786ed4ffeb/plugin/symbols.py#L82-L92
```diff
def on_symbol_selected(self, symbol_index):
+ if symbol_index == -1:
+ return
selected_symbol = self.symbols[symbol_index]
range = selected_symbol.get('location', selected_symbol.get('range'))
range = range.get('range', range)
```
</issue>
<code>
[start of plugin/symbols.py]
1 from .core.logging import debug
2 from .core.protocol import Request, Range
3 from .core.protocol import SymbolKind
4 from .core.registry import client_for_view, LspTextCommand
5 from .core.url import filename_to_uri
6 from .core.views import range_to_region
7
8 try:
9 from typing import List, Optional, Any
10 assert List and Optional and Any
11 except ImportError:
12 pass
13
14 symbol_kind_names = {
15 SymbolKind.File: "file",
16 SymbolKind.Module: "module",
17 SymbolKind.Namespace: "namespace",
18 SymbolKind.Package: "package",
19 SymbolKind.Class: "class",
20 SymbolKind.Method: "method",
21 SymbolKind.Property: "property",
22 SymbolKind.Field: "field",
23 SymbolKind.Constructor: "constructor",
24 SymbolKind.Enum: "enum",
25 SymbolKind.Interface: "interface",
26 SymbolKind.Function: "function",
27 SymbolKind.Variable: "variable",
28 SymbolKind.Constant: "constant",
29 SymbolKind.String: "string",
30 SymbolKind.Number: "number",
31 SymbolKind.Boolean: "boolean",
32 SymbolKind.Array: "array",
33 SymbolKind.Object: "object",
34 SymbolKind.Key: "key",
35 SymbolKind.Null: "null",
36 SymbolKind.EnumMember: "enum member",
37 SymbolKind.Struct: "struct",
38 SymbolKind.Event: "event",
39 SymbolKind.Operator: "operator",
40 SymbolKind.TypeParameter: "type parameter"
41 }
42
43
44 def format_symbol_kind(kind):
45 return symbol_kind_names.get(kind, str(kind))
46
47
48 def format_symbol(item):
49 """
50 items may be a list of strings, or a list of string lists.
51 In the latter case, each entry in the quick panel will show multiple rows
52 """
53 prefix = item.get("containerName", "")
54 label = prefix + "." + item.get("name") if prefix else item.get("name")
55 return [label, format_symbol_kind(item.get("kind"))]
56
57
58 class LspDocumentSymbolsCommand(LspTextCommand):
59 def __init__(self, view):
60 super().__init__(view)
61
62 def is_enabled(self, event=None):
63 return self.has_client_with_capability('documentSymbolProvider')
64
65 def run(self, edit) -> None:
66 client = client_for_view(self.view)
67 if client:
68 params = {
69 "textDocument": {
70 "uri": filename_to_uri(self.view.file_name())
71 }
72 }
73 request = Request.documentSymbols(params)
74 client.send_request(request, self.handle_response)
75
76 def handle_response(self, response: 'Optional[List]') -> None:
77 response_list = response or []
78 symbols = list(format_symbol(item) for item in response_list)
79 self.symbols = response_list
80 self.view.window().show_quick_panel(symbols, self.on_symbol_selected)
81
82 def on_symbol_selected(self, symbol_index):
83 selected_symbol = self.symbols[symbol_index]
84 range = selected_symbol.get('location', selected_symbol.get('range'))
85 range = range.get('range', range)
86 if not range:
87 debug('could not recognize the type: expected either SymbolInformation or DocumentSymbol')
88 return
89 region = range_to_region(Range.from_lsp(range), self.view)
90 self.view.show_at_center(region)
91 self.view.sel().clear()
92 self.view.sel().add(region)
93
[end of plugin/symbols.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/symbols.py b/plugin/symbols.py
--- a/plugin/symbols.py
+++ b/plugin/symbols.py
@@ -80,6 +80,8 @@
self.view.window().show_quick_panel(symbols, self.on_symbol_selected)
def on_symbol_selected(self, symbol_index):
+ if symbol_index == -1:
+ return
selected_symbol = self.symbols[symbol_index]
range = selected_symbol.get('location', selected_symbol.get('range'))
range = range.get('range', range)
| {"golden_diff": "diff --git a/plugin/symbols.py b/plugin/symbols.py\n--- a/plugin/symbols.py\n+++ b/plugin/symbols.py\n@@ -80,6 +80,8 @@\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n \n def on_symbol_selected(self, symbol_index):\n+ if symbol_index == -1:\n+ return\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol.get('location', selected_symbol.get('range'))\n range = range.get('range', range)\n", "issue": "when cancelling the symbols panel, the last symbol is selected\nhttps://github.com/tomv564/LSP/blob/be904c56fddf35f724486de405a168786ed4ffeb/plugin/symbols.py#L82-L92\r\n```diff\r\n def on_symbol_selected(self, symbol_index):\r\n+ if symbol_index == -1:\r\n+ return\r\n selected_symbol = self.symbols[symbol_index]\r\n range = selected_symbol.get('location', selected_symbol.get('range'))\r\n range = range.get('range', range)\r\n```\n", "before_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import Request, Range\nfrom .core.protocol import SymbolKind\nfrom .core.registry import client_for_view, LspTextCommand\nfrom .core.url import filename_to_uri\nfrom .core.views import range_to_region\n\ntry:\n from typing import List, Optional, Any\n assert List and Optional and Any\nexcept ImportError:\n pass\n\nsymbol_kind_names = {\n SymbolKind.File: \"file\",\n SymbolKind.Module: \"module\",\n SymbolKind.Namespace: \"namespace\",\n SymbolKind.Package: \"package\",\n SymbolKind.Class: \"class\",\n SymbolKind.Method: \"method\",\n SymbolKind.Property: \"property\",\n SymbolKind.Field: \"field\",\n SymbolKind.Constructor: \"constructor\",\n SymbolKind.Enum: \"enum\",\n SymbolKind.Interface: \"interface\",\n SymbolKind.Function: \"function\",\n SymbolKind.Variable: \"variable\",\n SymbolKind.Constant: \"constant\",\n SymbolKind.String: \"string\",\n SymbolKind.Number: \"number\",\n SymbolKind.Boolean: \"boolean\",\n SymbolKind.Array: \"array\",\n SymbolKind.Object: \"object\",\n SymbolKind.Key: \"key\",\n SymbolKind.Null: \"null\",\n SymbolKind.EnumMember: \"enum member\",\n SymbolKind.Struct: \"struct\",\n SymbolKind.Event: \"event\",\n SymbolKind.Operator: \"operator\",\n SymbolKind.TypeParameter: \"type parameter\"\n}\n\n\ndef format_symbol_kind(kind):\n return symbol_kind_names.get(kind, str(kind))\n\n\ndef format_symbol(item):\n \"\"\"\n items may be a list of strings, or a list of string lists.\n In the latter case, each entry in the quick panel will show multiple rows\n \"\"\"\n prefix = item.get(\"containerName\", \"\")\n label = prefix + \".\" + item.get(\"name\") if prefix else item.get(\"name\")\n return [label, format_symbol_kind(item.get(\"kind\"))]\n\n\nclass LspDocumentSymbolsCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_enabled(self, event=None):\n return self.has_client_with_capability('documentSymbolProvider')\n\n def run(self, edit) -> None:\n client = client_for_view(self.view)\n if client:\n params = {\n \"textDocument\": {\n \"uri\": filename_to_uri(self.view.file_name())\n }\n }\n request = Request.documentSymbols(params)\n client.send_request(request, self.handle_response)\n\n def handle_response(self, response: 'Optional[List]') -> None:\n response_list = response or []\n symbols = list(format_symbol(item) for item in response_list)\n self.symbols = response_list\n self.view.window().show_quick_panel(symbols, self.on_symbol_selected)\n\n def on_symbol_selected(self, symbol_index):\n selected_symbol = self.symbols[symbol_index]\n range = selected_symbol.get('location', selected_symbol.get('range'))\n range = range.get('range', range)\n if not range:\n debug('could not recognize the type: expected either SymbolInformation or DocumentSymbol')\n return\n region = range_to_region(Range.from_lsp(range), self.view)\n self.view.show_at_center(region)\n self.view.sel().clear()\n self.view.sel().add(region)\n", "path": "plugin/symbols.py"}]} | 1,545 | 116 |
gh_patches_debug_7466 | rasdani/github-patches | git_diff | matrix-org__synapse-15961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build packages for Debian Trixie
Please can we publish packages to the apt repository for [Debian Trixie (13)](https://wiki.debian.org/DebianTrixie) which is the current testing release at the time of writing. It became the current testing release on 2023-06-10.
I run debian testing on the server I run synapse on and the change from bookworm to trixie has meant that I now get errors on `apt update`:
```
E: The repository 'https://packages.matrix.org/debian trixie Release' does not have a Release file.
```
</issue>
<code>
[start of scripts-dev/build_debian_packages.py]
1 #!/usr/bin/env python3
2
3 # Build the Debian packages using Docker images.
4 #
5 # This script builds the Docker images and then executes them sequentially, each
6 # one building a Debian package for the targeted operating system. It is
7 # designed to be a "single command" to produce all the images.
8 #
9 # By default, builds for all known distributions, but a list of distributions
10 # can be passed on the commandline for debugging.
11
12 import argparse
13 import json
14 import os
15 import signal
16 import subprocess
17 import sys
18 import threading
19 from concurrent.futures import ThreadPoolExecutor
20 from types import FrameType
21 from typing import Collection, Optional, Sequence, Set
22
23 # These are expanded inside the dockerfile to be a fully qualified image name.
24 # e.g. docker.io/library/debian:bullseye
25 #
26 # If an EOL is forced by a Python version and we're dropping support for it, make sure
27 # to remove references to the distibution across Synapse (search for "bullseye" for
28 # example)
29 DISTS = (
30 "debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
31 "debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
32 "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
33 "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
34 "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
35 "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
36 "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
37 )
38
39 DESC = """\
40 Builds .debs for synapse, using a Docker image for the build environment.
41
42 By default, builds for all known distributions, but a list of distributions
43 can be passed on the commandline for debugging.
44 """
45
46 projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
47
48
49 class Builder(object):
50 def __init__(
51 self,
52 redirect_stdout: bool = False,
53 docker_build_args: Optional[Sequence[str]] = None,
54 ):
55 self.redirect_stdout = redirect_stdout
56 self._docker_build_args = tuple(docker_build_args or ())
57 self.active_containers: Set[str] = set()
58 self._lock = threading.Lock()
59 self._failed = False
60
61 def run_build(self, dist: str, skip_tests: bool = False) -> None:
62 """Build deb for a single distribution"""
63
64 if self._failed:
65 print("not building %s due to earlier failure" % (dist,))
66 raise Exception("failed")
67
68 try:
69 self._inner_build(dist, skip_tests)
70 except Exception as e:
71 print("build of %s failed: %s" % (dist, e), file=sys.stderr)
72 self._failed = True
73 raise
74
75 def _inner_build(self, dist: str, skip_tests: bool = False) -> None:
76 tag = dist.split(":", 1)[1]
77
78 # Make the dir where the debs will live.
79 #
80 # Note that we deliberately put this outside the source tree, otherwise
81 # we tend to get source packages which are full of debs. (We could hack
82 # around that with more magic in the build_debian.sh script, but that
83 # doesn't solve the problem for natively-run dpkg-buildpakage).
84 debsdir = os.path.join(projdir, "../debs")
85 os.makedirs(debsdir, exist_ok=True)
86
87 if self.redirect_stdout:
88 logfile = os.path.join(debsdir, "%s.buildlog" % (tag,))
89 print("building %s: directing output to %s" % (dist, logfile))
90 stdout = open(logfile, "w")
91 else:
92 stdout = None
93
94 # first build a docker image for the build environment
95 build_args = (
96 (
97 "docker",
98 "build",
99 "--tag",
100 "dh-venv-builder:" + tag,
101 "--build-arg",
102 "distro=" + dist,
103 "-f",
104 "docker/Dockerfile-dhvirtualenv",
105 )
106 + self._docker_build_args
107 + ("docker",)
108 )
109
110 subprocess.check_call(
111 build_args,
112 stdout=stdout,
113 stderr=subprocess.STDOUT,
114 cwd=projdir,
115 )
116
117 container_name = "synapse_build_" + tag
118 with self._lock:
119 self.active_containers.add(container_name)
120
121 # then run the build itself
122 subprocess.check_call(
123 [
124 "docker",
125 "run",
126 "--rm",
127 "--name",
128 container_name,
129 "--volume=" + projdir + ":/synapse/source:ro",
130 "--volume=" + debsdir + ":/debs",
131 "-e",
132 "TARGET_USERID=%i" % (os.getuid(),),
133 "-e",
134 "TARGET_GROUPID=%i" % (os.getgid(),),
135 "-e",
136 "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
137 "dh-venv-builder:" + tag,
138 ],
139 stdout=stdout,
140 stderr=subprocess.STDOUT,
141 )
142
143 with self._lock:
144 self.active_containers.remove(container_name)
145
146 if stdout is not None:
147 stdout.close()
148 print("Completed build of %s" % (dist,))
149
150 def kill_containers(self) -> None:
151 with self._lock:
152 active = list(self.active_containers)
153
154 for c in active:
155 print("killing container %s" % (c,))
156 subprocess.run(
157 [
158 "docker",
159 "kill",
160 c,
161 ],
162 stdout=subprocess.DEVNULL,
163 )
164 with self._lock:
165 self.active_containers.remove(c)
166
167
168 def run_builds(
169 builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False
170 ) -> None:
171 def sig(signum: int, _frame: Optional[FrameType]) -> None:
172 print("Caught SIGINT")
173 builder.kill_containers()
174
175 signal.signal(signal.SIGINT, sig)
176
177 with ThreadPoolExecutor(max_workers=jobs) as e:
178 res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
179
180 # make sure we consume the iterable so that exceptions are raised.
181 for _ in res:
182 pass
183
184
185 if __name__ == "__main__":
186 parser = argparse.ArgumentParser(
187 description=DESC,
188 )
189 parser.add_argument(
190 "-j",
191 "--jobs",
192 type=int,
193 default=1,
194 help="specify the number of builds to run in parallel",
195 )
196 parser.add_argument(
197 "--no-check",
198 action="store_true",
199 help="skip running tests after building",
200 )
201 parser.add_argument(
202 "--docker-build-arg",
203 action="append",
204 help="specify an argument to pass to docker build",
205 )
206 parser.add_argument(
207 "--show-dists-json",
208 action="store_true",
209 help="instead of building the packages, just list the dists to build for, as a json array",
210 )
211 parser.add_argument(
212 "dist",
213 nargs="*",
214 default=DISTS,
215 help="a list of distributions to build for. Default: %(default)s",
216 )
217 args = parser.parse_args()
218 if args.show_dists_json:
219 print(json.dumps(DISTS))
220 else:
221 builder = Builder(
222 redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg
223 )
224 run_builds(
225 builder,
226 dists=args.dist,
227 jobs=args.jobs,
228 skip_tests=args.no_check,
229 )
230
[end of scripts-dev/build_debian_packages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -34,6 +34,7 @@
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
+ "debian:trixie", # (EOL not specified yet)
)
DESC = """\
| {"golden_diff": "diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py\n--- a/scripts-dev/build_debian_packages.py\n+++ b/scripts-dev/build_debian_packages.py\n@@ -34,6 +34,7 @@\n \"ubuntu:jammy\", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)\n \"ubuntu:kinetic\", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)\n \"ubuntu:lunar\", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)\n+ \"debian:trixie\", # (EOL not specified yet)\n )\n \n DESC = \"\"\"\\\n", "issue": "Build packages for Debian Trixie\nPlease can we publish packages to the apt repository for [Debian Trixie (13)](https://wiki.debian.org/DebianTrixie) which is the current testing release at the time of writing. It became the current testing release on 2023-06-10.\r\n\r\nI run debian testing on the server I run synapse on and the change from bookworm to trixie has meant that I now get errors on `apt update`:\r\n\r\n```\r\nE: The repository 'https://packages.matrix.org/debian trixie Release' does not have a Release file.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Build the Debian packages using Docker images.\n#\n# This script builds the Docker images and then executes them sequentially, each\n# one building a Debian package for the targeted operating system. It is\n# designed to be a \"single command\" to produce all the images.\n#\n# By default, builds for all known distributions, but a list of distributions\n# can be passed on the commandline for debugging.\n\nimport argparse\nimport json\nimport os\nimport signal\nimport subprocess\nimport sys\nimport threading\nfrom concurrent.futures import ThreadPoolExecutor\nfrom types import FrameType\nfrom typing import Collection, Optional, Sequence, Set\n\n# These are expanded inside the dockerfile to be a fully qualified image name.\n# e.g. docker.io/library/debian:bullseye\n#\n# If an EOL is forced by a Python version and we're dropping support for it, make sure\n# to remove references to the distibution across Synapse (search for \"bullseye\" for\n# example)\nDISTS = (\n \"debian:bullseye\", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)\n \"debian:bookworm\", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)\n \"debian:sid\", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)\n \"ubuntu:focal\", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)\n \"ubuntu:jammy\", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)\n \"ubuntu:kinetic\", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)\n \"ubuntu:lunar\", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)\n)\n\nDESC = \"\"\"\\\nBuilds .debs for synapse, using a Docker image for the build environment.\n\nBy default, builds for all known distributions, but a list of distributions\ncan be passed on the commandline for debugging.\n\"\"\"\n\nprojdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n\nclass Builder(object):\n def __init__(\n self,\n redirect_stdout: bool = False,\n docker_build_args: Optional[Sequence[str]] = None,\n ):\n self.redirect_stdout = redirect_stdout\n self._docker_build_args = tuple(docker_build_args or ())\n self.active_containers: Set[str] = set()\n self._lock = threading.Lock()\n self._failed = False\n\n def run_build(self, dist: str, skip_tests: bool = False) -> None:\n \"\"\"Build deb for a single distribution\"\"\"\n\n if self._failed:\n print(\"not building %s due to earlier failure\" % (dist,))\n raise Exception(\"failed\")\n\n try:\n self._inner_build(dist, skip_tests)\n except Exception as e:\n print(\"build of %s failed: %s\" % (dist, e), file=sys.stderr)\n self._failed = True\n raise\n\n def _inner_build(self, dist: str, skip_tests: bool = False) -> None:\n tag = dist.split(\":\", 1)[1]\n\n # Make the dir where the debs will live.\n #\n # Note that we deliberately put this outside the source tree, otherwise\n # we tend to get source packages which are full of debs. (We could hack\n # around that with more magic in the build_debian.sh script, but that\n # doesn't solve the problem for natively-run dpkg-buildpakage).\n debsdir = os.path.join(projdir, \"../debs\")\n os.makedirs(debsdir, exist_ok=True)\n\n if self.redirect_stdout:\n logfile = os.path.join(debsdir, \"%s.buildlog\" % (tag,))\n print(\"building %s: directing output to %s\" % (dist, logfile))\n stdout = open(logfile, \"w\")\n else:\n stdout = None\n\n # first build a docker image for the build environment\n build_args = (\n (\n \"docker\",\n \"build\",\n \"--tag\",\n \"dh-venv-builder:\" + tag,\n \"--build-arg\",\n \"distro=\" + dist,\n \"-f\",\n \"docker/Dockerfile-dhvirtualenv\",\n )\n + self._docker_build_args\n + (\"docker\",)\n )\n\n subprocess.check_call(\n build_args,\n stdout=stdout,\n stderr=subprocess.STDOUT,\n cwd=projdir,\n )\n\n container_name = \"synapse_build_\" + tag\n with self._lock:\n self.active_containers.add(container_name)\n\n # then run the build itself\n subprocess.check_call(\n [\n \"docker\",\n \"run\",\n \"--rm\",\n \"--name\",\n container_name,\n \"--volume=\" + projdir + \":/synapse/source:ro\",\n \"--volume=\" + debsdir + \":/debs\",\n \"-e\",\n \"TARGET_USERID=%i\" % (os.getuid(),),\n \"-e\",\n \"TARGET_GROUPID=%i\" % (os.getgid(),),\n \"-e\",\n \"DEB_BUILD_OPTIONS=%s\" % (\"nocheck\" if skip_tests else \"\"),\n \"dh-venv-builder:\" + tag,\n ],\n stdout=stdout,\n stderr=subprocess.STDOUT,\n )\n\n with self._lock:\n self.active_containers.remove(container_name)\n\n if stdout is not None:\n stdout.close()\n print(\"Completed build of %s\" % (dist,))\n\n def kill_containers(self) -> None:\n with self._lock:\n active = list(self.active_containers)\n\n for c in active:\n print(\"killing container %s\" % (c,))\n subprocess.run(\n [\n \"docker\",\n \"kill\",\n c,\n ],\n stdout=subprocess.DEVNULL,\n )\n with self._lock:\n self.active_containers.remove(c)\n\n\ndef run_builds(\n builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False\n) -> None:\n def sig(signum: int, _frame: Optional[FrameType]) -> None:\n print(\"Caught SIGINT\")\n builder.kill_containers()\n\n signal.signal(signal.SIGINT, sig)\n\n with ThreadPoolExecutor(max_workers=jobs) as e:\n res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)\n\n # make sure we consume the iterable so that exceptions are raised.\n for _ in res:\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=DESC,\n )\n parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n default=1,\n help=\"specify the number of builds to run in parallel\",\n )\n parser.add_argument(\n \"--no-check\",\n action=\"store_true\",\n help=\"skip running tests after building\",\n )\n parser.add_argument(\n \"--docker-build-arg\",\n action=\"append\",\n help=\"specify an argument to pass to docker build\",\n )\n parser.add_argument(\n \"--show-dists-json\",\n action=\"store_true\",\n help=\"instead of building the packages, just list the dists to build for, as a json array\",\n )\n parser.add_argument(\n \"dist\",\n nargs=\"*\",\n default=DISTS,\n help=\"a list of distributions to build for. Default: %(default)s\",\n )\n args = parser.parse_args()\n if args.show_dists_json:\n print(json.dumps(DISTS))\n else:\n builder = Builder(\n redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg\n )\n run_builds(\n builder,\n dists=args.dist,\n jobs=args.jobs,\n skip_tests=args.no_check,\n )\n", "path": "scripts-dev/build_debian_packages.py"}]} | 3,131 | 235 |
gh_patches_debug_21902 | rasdani/github-patches | git_diff | pypa__pip-4035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip on windows
Prerequisites:
Windows
python 2.7
setuptools 3.6
easy_install
pip
Problem:
we have a requirements file that contains "pyinstaller" package dependency. I suppose that it somehow depends on cryptography and it in its turn somehow depends on cffi package and that's why it tries to install it as well. For now we specify this depency in a following way:
pyinstaller==2.1.1dev-89e99dd # from requirements file.
Next we try to install our pack of requirements with
pip -f {our additional private repository} -U -egg -r {requirements file name}
From the build log screenshot attached you can see the following string(build_log attachment):
[12:59:39]: [Step 7/15] Installed c:\users\administrator\envs\python27-32bit\lib\site-packages\cryptography-0.3-py2.7-win32.egg
[12:59:39]: [Step 7/15]
[12:59:39]: [Step 7/15] error: c:\buildagent\temp\buildtmp\easy_install-snsbak\cryptography-0.3\cffi-0.8.2-py2.7-win32.egg_cffi_backend.pyd: Access is denied
I've done a few of investigation and attached a screenshot of related file activity with a highlighted important items (file_activity_list attachment). It seems that python cannod delete this _cffi_backend.pyd file. A few more investigation revealed that the file creation mode allows to delete this file(file_open_result attachment). From the event properties(event_properties attachment) I see that all related modules appear as if they are loaded using LoadLibray windows api, that's how it supposed to be done with python C extensions if we want to use this code. But it appears to me that someone forgot to unload all these modules before trying to delete them and that's why the file cannot be deleted. Please refer to _cffi_backend_loading attachment - it proves _cffi_backend.pyd is being loaded as a library(WinDbg screen). And the very next screenshot is the state of pip install command when WinDbg broke on module load - _break_python_screen attachment. Yet I pointed out the same problem if I just parse this requirements file with pkg_tools and specify install_requires setup's argument with these requirements - setup develop fails with the same result. As to me this problem more relates to easy_install/setuptools core. And yet to mention - without '--egg' argument the problem doesn't reproduce.






</issue>
<code>
[start of pip/_vendor/requests/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 # __
4 # /__) _ _ _ _ _/ _
5 # / ( (- (/ (/ (- _) / _)
6 # /
7
8 """
9 Requests HTTP library
10 ~~~~~~~~~~~~~~~~~~~~~
11
12 Requests is an HTTP library, written in Python, for human beings. Basic GET
13 usage:
14
15 >>> import requests
16 >>> r = requests.get('https://www.python.org')
17 >>> r.status_code
18 200
19 >>> 'Python is a programming language' in r.content
20 True
21
22 ... or POST:
23
24 >>> payload = dict(key1='value1', key2='value2')
25 >>> r = requests.post('http://httpbin.org/post', data=payload)
26 >>> print(r.text)
27 {
28 ...
29 "form": {
30 "key2": "value2",
31 "key1": "value1"
32 },
33 ...
34 }
35
36 The other HTTP methods are supported - see `requests.api`. Full documentation
37 is at <http://python-requests.org>.
38
39 :copyright: (c) 2016 by Kenneth Reitz.
40 :license: Apache 2.0, see LICENSE for more details.
41
42 """
43
44 __title__ = 'requests'
45 __version__ = '2.10.0'
46 __build__ = 0x021000
47 __author__ = 'Kenneth Reitz'
48 __license__ = 'Apache 2.0'
49 __copyright__ = 'Copyright 2016 Kenneth Reitz'
50
51 # Attempt to enable urllib3's SNI support, if possible
52 try:
53 from .packages.urllib3.contrib import pyopenssl
54 pyopenssl.inject_into_urllib3()
55 except ImportError:
56 pass
57
58 import warnings
59
60 # urllib3's DependencyWarnings should be silenced.
61 from .packages.urllib3.exceptions import DependencyWarning
62 warnings.simplefilter('ignore', DependencyWarning)
63
64 from . import utils
65 from .models import Request, Response, PreparedRequest
66 from .api import request, get, head, post, patch, put, delete, options
67 from .sessions import session, Session
68 from .status_codes import codes
69 from .exceptions import (
70 RequestException, Timeout, URLRequired,
71 TooManyRedirects, HTTPError, ConnectionError,
72 FileModeWarning, ConnectTimeout, ReadTimeout
73 )
74
75 # Set default logging handler to avoid "No handler found" warnings.
76 import logging
77 try: # Python 2.7+
78 from logging import NullHandler
79 except ImportError:
80 class NullHandler(logging.Handler):
81 def emit(self, record):
82 pass
83
84 logging.getLogger(__name__).addHandler(NullHandler())
85
86 import warnings
87
88 # FileModeWarnings go off per the default.
89 warnings.simplefilter('default', FileModeWarning, append=True)
90
[end of pip/_vendor/requests/__init__.py]
[start of pip/_vendor/requests/compat.py]
1 # -*- coding: utf-8 -*-
2
3 """
4 pythoncompat
5 """
6
7 from .packages import chardet
8
9 import sys
10
11 # -------
12 # Pythons
13 # -------
14
15 # Syntax sugar.
16 _ver = sys.version_info
17
18 #: Python 2.x?
19 is_py2 = (_ver[0] == 2)
20
21 #: Python 3.x?
22 is_py3 = (_ver[0] == 3)
23
24 try:
25 import simplejson as json
26 except (ImportError, SyntaxError):
27 # simplejson does not support Python 3.2, it throws a SyntaxError
28 # because of u'...' Unicode literals.
29 import json
30
31 # ---------
32 # Specifics
33 # ---------
34
35 if is_py2:
36 from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
37 from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
38 from urllib2 import parse_http_list
39 import cookielib
40 from Cookie import Morsel
41 from StringIO import StringIO
42 from .packages.urllib3.packages.ordered_dict import OrderedDict
43
44 builtin_str = str
45 bytes = str
46 str = unicode
47 basestring = basestring
48 numeric_types = (int, long, float)
49
50 elif is_py3:
51 from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
52 from urllib.request import parse_http_list, getproxies, proxy_bypass
53 from http import cookiejar as cookielib
54 from http.cookies import Morsel
55 from io import StringIO
56 from collections import OrderedDict
57
58 builtin_str = str
59 str = str
60 bytes = bytes
61 basestring = (str, bytes)
62 numeric_types = (int, float)
63
[end of pip/_vendor/requests/compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pip/_vendor/requests/__init__.py b/pip/_vendor/requests/__init__.py
--- a/pip/_vendor/requests/__init__.py
+++ b/pip/_vendor/requests/__init__.py
@@ -49,11 +49,13 @@
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
-try:
- from .packages.urllib3.contrib import pyopenssl
- pyopenssl.inject_into_urllib3()
-except ImportError:
- pass
+# Note: Patched by pip to prevent using the PyOpenSSL module. On Windows this
+# prevents upgrading cryptography.
+# try:
+# from .packages.urllib3.contrib import pyopenssl
+# pyopenssl.inject_into_urllib3()
+# except ImportError:
+# pass
import warnings
diff --git a/pip/_vendor/requests/compat.py b/pip/_vendor/requests/compat.py
--- a/pip/_vendor/requests/compat.py
+++ b/pip/_vendor/requests/compat.py
@@ -21,12 +21,14 @@
#: Python 3.x?
is_py3 = (_ver[0] == 3)
-try:
- import simplejson as json
-except (ImportError, SyntaxError):
- # simplejson does not support Python 3.2, it throws a SyntaxError
- # because of u'...' Unicode literals.
- import json
+# Note: We've patched out simplejson support in pip because it prevents
+# upgrading simplejson on Windows.
+# try:
+# import simplejson as json
+# except (ImportError, SyntaxError):
+# # simplejson does not support Python 3.2, it throws a SyntaxError
+# # because of u'...' Unicode literals.
+import json
# ---------
# Specifics
| {"golden_diff": "diff --git a/pip/_vendor/requests/__init__.py b/pip/_vendor/requests/__init__.py\n--- a/pip/_vendor/requests/__init__.py\n+++ b/pip/_vendor/requests/__init__.py\n@@ -49,11 +49,13 @@\n __copyright__ = 'Copyright 2016 Kenneth Reitz'\n \n # Attempt to enable urllib3's SNI support, if possible\n-try:\n- from .packages.urllib3.contrib import pyopenssl\n- pyopenssl.inject_into_urllib3()\n-except ImportError:\n- pass\n+# Note: Patched by pip to prevent using the PyOpenSSL module. On Windows this\n+# prevents upgrading cryptography.\n+# try:\n+# from .packages.urllib3.contrib import pyopenssl\n+# pyopenssl.inject_into_urllib3()\n+# except ImportError:\n+# pass\n \n import warnings\n \ndiff --git a/pip/_vendor/requests/compat.py b/pip/_vendor/requests/compat.py\n--- a/pip/_vendor/requests/compat.py\n+++ b/pip/_vendor/requests/compat.py\n@@ -21,12 +21,14 @@\n #: Python 3.x?\n is_py3 = (_ver[0] == 3)\n \n-try:\n- import simplejson as json\n-except (ImportError, SyntaxError):\n- # simplejson does not support Python 3.2, it throws a SyntaxError\n- # because of u'...' Unicode literals.\n- import json\n+# Note: We've patched out simplejson support in pip because it prevents\n+# upgrading simplejson on Windows.\n+# try:\n+# import simplejson as json\n+# except (ImportError, SyntaxError):\n+# # simplejson does not support Python 3.2, it throws a SyntaxError\n+# # because of u'...' Unicode literals.\n+import json\n \n # ---------\n # Specifics\n", "issue": "pip on windows\nPrerequisites:\nWindows\npython 2.7\nsetuptools 3.6\neasy_install\npip\n\nProblem:\nwe have a requirements file that contains \"pyinstaller\" package dependency. I suppose that it somehow depends on cryptography and it in its turn somehow depends on cffi package and that's why it tries to install it as well. For now we specify this depency in a following way:\n\npyinstaller==2.1.1dev-89e99dd # from requirements file.\n\nNext we try to install our pack of requirements with \n\npip -f {our additional private repository} -U -egg -r {requirements file name}\n\nFrom the build log screenshot attached you can see the following string(build_log attachment):\n\n[12:59:39]: [Step 7/15] Installed c:\\users\\administrator\\envs\\python27-32bit\\lib\\site-packages\\cryptography-0.3-py2.7-win32.egg\n[12:59:39]: [Step 7/15] \n[12:59:39]: [Step 7/15] error: c:\\buildagent\\temp\\buildtmp\\easy_install-snsbak\\cryptography-0.3\\cffi-0.8.2-py2.7-win32.egg_cffi_backend.pyd: Access is denied\n\nI've done a few of investigation and attached a screenshot of related file activity with a highlighted important items (file_activity_list attachment). It seems that python cannod delete this _cffi_backend.pyd file. A few more investigation revealed that the file creation mode allows to delete this file(file_open_result attachment). From the event properties(event_properties attachment) I see that all related modules appear as if they are loaded using LoadLibray windows api, that's how it supposed to be done with python C extensions if we want to use this code. But it appears to me that someone forgot to unload all these modules before trying to delete them and that's why the file cannot be deleted. Please refer to _cffi_backend_loading attachment - it proves _cffi_backend.pyd is being loaded as a library(WinDbg screen). And the very next screenshot is the state of pip install command when WinDbg broke on module load - _break_python_screen attachment. Yet I pointed out the same problem if I just parse this requirements file with pkg_tools and specify install_requires setup's argument with these requirements - setup develop fails with the same result. As to me this problem more relates to easy_install/setuptools core. And yet to mention - without '--egg' argument the problem doesn't reproduce.\n\n\n\n\n\n\n\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# __\n# /__) _ _ _ _ _/ _\n# / ( (- (/ (/ (- _) / _)\n# /\n\n\"\"\"\nRequests HTTP library\n~~~~~~~~~~~~~~~~~~~~~\n\nRequests is an HTTP library, written in Python, for human beings. Basic GET\nusage:\n\n >>> import requests\n >>> r = requests.get('https://www.python.org')\n >>> r.status_code\n 200\n >>> 'Python is a programming language' in r.content\n True\n\n... or POST:\n\n >>> payload = dict(key1='value1', key2='value2')\n >>> r = requests.post('http://httpbin.org/post', data=payload)\n >>> print(r.text)\n {\n ...\n \"form\": {\n \"key2\": \"value2\",\n \"key1\": \"value1\"\n },\n ...\n }\n\nThe other HTTP methods are supported - see `requests.api`. Full documentation\nis at <http://python-requests.org>.\n\n:copyright: (c) 2016 by Kenneth Reitz.\n:license: Apache 2.0, see LICENSE for more details.\n\n\"\"\"\n\n__title__ = 'requests'\n__version__ = '2.10.0'\n__build__ = 0x021000\n__author__ = 'Kenneth Reitz'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright 2016 Kenneth Reitz'\n\n# Attempt to enable urllib3's SNI support, if possible\ntry:\n from .packages.urllib3.contrib import pyopenssl\n pyopenssl.inject_into_urllib3()\nexcept ImportError:\n pass\n\nimport warnings\n\n# urllib3's DependencyWarnings should be silenced.\nfrom .packages.urllib3.exceptions import DependencyWarning\nwarnings.simplefilter('ignore', DependencyWarning)\n\nfrom . import utils\nfrom .models import Request, Response, PreparedRequest\nfrom .api import request, get, head, post, patch, put, delete, options\nfrom .sessions import session, Session\nfrom .status_codes import codes\nfrom .exceptions import (\n RequestException, Timeout, URLRequired,\n TooManyRedirects, HTTPError, ConnectionError,\n FileModeWarning, ConnectTimeout, ReadTimeout\n)\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\ntry: # Python 2.7+\n from logging import NullHandler\nexcept ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\nimport warnings\n\n# FileModeWarnings go off per the default.\nwarnings.simplefilter('default', FileModeWarning, append=True)\n", "path": "pip/_vendor/requests/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\npythoncompat\n\"\"\"\n\nfrom .packages import chardet\n\nimport sys\n\n# -------\n# Pythons\n# -------\n\n# Syntax sugar.\n_ver = sys.version_info\n\n#: Python 2.x?\nis_py2 = (_ver[0] == 2)\n\n#: Python 3.x?\nis_py3 = (_ver[0] == 3)\n\ntry:\n import simplejson as json\nexcept (ImportError, SyntaxError):\n # simplejson does not support Python 3.2, it throws a SyntaxError\n # because of u'...' Unicode literals.\n import json\n\n# ---------\n# Specifics\n# ---------\n\nif is_py2:\n from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass\n from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag\n from urllib2 import parse_http_list\n import cookielib\n from Cookie import Morsel\n from StringIO import StringIO\n from .packages.urllib3.packages.ordered_dict import OrderedDict\n\n builtin_str = str\n bytes = str\n str = unicode\n basestring = basestring\n numeric_types = (int, long, float)\n\nelif is_py3:\n from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag\n from urllib.request import parse_http_list, getproxies, proxy_bypass\n from http import cookiejar as cookielib\n from http.cookies import Morsel\n from io import StringIO\n from collections import OrderedDict\n\n builtin_str = str\n str = str\n bytes = bytes\n basestring = (str, bytes)\n numeric_types = (int, float)\n", "path": "pip/_vendor/requests/compat.py"}]} | 2,785 | 417 |
gh_patches_debug_24280 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-1098 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Device Support Request] Legrand Dimmer switch w/o neutral for sw_build_id=002e
Hi, guy!
I have Legrand Dimmer switch w/o neutral which is not recognized neither as DimmerWithoutNeutral nor as DimmerWithoutNeutral2.
The device runs this firmware:
```
hw_version = 7
stack_version = 66
sw_build_id = 002e
```
In this firmware Legrand changed the device type for endpoint=242 from 0x0061 to 0x0066, and they also added a few more clusters:
```
endpoint=1: out: 0x0006, 0x0005
endpoint=242: in: 0x0021
```
Here is a complete device signature
```
{
"node_descriptor": "<NodeDescriptor byte1=17 byte2=64 mac_capability_flags=142 manufacturer_code=4129 maximum_buffer_size=89 maximum_incoming_transfer_size=63 server_mask=10752 maximum_outgoing_transfer_size=63 descriptor_capability_field=0>",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0100",
"in_clusters": [
"0x0000",
"0x0003",
"0x0004",
"0x0005",
"0x0006",
"0x0008",
"0x000f",
"0xfc01"
],
"out_clusters": [
"0x0000",
"0x0019",
"0xfc01"
]
}
},
"manufacturer": " Legrand",
"model": " Dimmer switch w/o neutral",
"class": "zigpy.device.Device"
}
```
I've updated the definition of DimmerWithoutNeutral2 (see below) and now the device is properly recognized.
```
class DimmerWithoutNeutral2(DimmerWithoutNeutral):
"""Dimmer switch w/o neutral 2."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=256
# device_version=1
# input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]
# output_clusters=[0, 64513, 25]>
MODELS_INFO: [(f" {LEGRAND}", " Dimmer switch w/o neutral")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
BinaryInput.cluster_id,
MANUFACTURER_SPECIFIC_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
MANUFACTURER_SPECIFIC_CLUSTER_ID,
Ota.cluster_id,
OnOff.cluster_id,
Scenes.cluster_id,
],
},
242: {
PROFILE_ID: 41440,
DEVICE_TYPE: 0x0066,
INPUT_CLUSTERS: [0x0021],
OUTPUT_CLUSTERS: [0x0021],
},
},
}
```
Please add this quirk to master.
</issue>
<code>
[start of zhaquirks/legrand/dimmer.py]
1 """Device handler for Legrand Dimmer switch w/o neutral."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomCluster, CustomDevice
4 import zigpy.types as t
5 from zigpy.zcl.clusters.general import (
6 Basic,
7 BinaryInput,
8 Groups,
9 Identify,
10 LevelControl,
11 OnOff,
12 Ota,
13 Scenes,
14 )
15 from zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster
16
17 from zhaquirks.const import (
18 DEVICE_TYPE,
19 ENDPOINTS,
20 INPUT_CLUSTERS,
21 MODELS_INFO,
22 OUTPUT_CLUSTERS,
23 PROFILE_ID,
24 )
25 from zhaquirks.legrand import LEGRAND
26
27 MANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC01 # decimal = 64513
28
29
30 class LegrandCluster(CustomCluster, ManufacturerSpecificCluster):
31 """LegrandCluster."""
32
33 cluster_id = MANUFACTURER_SPECIFIC_CLUSTER_ID
34 name = "LegrandCluster"
35 ep_attribute = "legrand_cluster"
36 manufacturer_attributes = {
37 0x0000: ("dimmer", t.data16),
38 0x0001: ("led_dark", t.Bool),
39 0x0002: ("led_on", t.Bool),
40 }
41
42
43 class DimmerWithoutNeutral(CustomDevice):
44 """Dimmer switch w/o neutral."""
45
46 signature = {
47 # <SimpleDescriptor endpoint=1 profile=260 device_type=256
48 # device_version=1
49 # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]
50 # output_clusters=[0, 64513, 25]>
51 MODELS_INFO: [(f" {LEGRAND}", " Dimmer switch w/o neutral")],
52 ENDPOINTS: {
53 1: {
54 PROFILE_ID: zha.PROFILE_ID,
55 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
56 INPUT_CLUSTERS: [
57 Basic.cluster_id,
58 Identify.cluster_id,
59 Groups.cluster_id,
60 OnOff.cluster_id,
61 LevelControl.cluster_id,
62 Scenes.cluster_id,
63 BinaryInput.cluster_id,
64 MANUFACTURER_SPECIFIC_CLUSTER_ID,
65 ],
66 OUTPUT_CLUSTERS: [
67 Basic.cluster_id,
68 MANUFACTURER_SPECIFIC_CLUSTER_ID,
69 Ota.cluster_id,
70 ],
71 }
72 },
73 }
74
75 replacement = {
76 ENDPOINTS: {
77 1: {
78 PROFILE_ID: zha.PROFILE_ID,
79 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
80 INPUT_CLUSTERS: [
81 Basic.cluster_id,
82 Identify.cluster_id,
83 Groups.cluster_id,
84 OnOff.cluster_id,
85 LevelControl.cluster_id,
86 Scenes.cluster_id,
87 BinaryInput.cluster_id,
88 LegrandCluster,
89 ],
90 OUTPUT_CLUSTERS: [Basic.cluster_id, LegrandCluster, Ota.cluster_id],
91 }
92 }
93 }
94
95
96 class DimmerWithoutNeutral2(DimmerWithoutNeutral):
97 """Dimmer switch w/o neutral 2."""
98
99 signature = {
100 # <SimpleDescriptor endpoint=1 profile=260 device_type=256
101 # device_version=1
102 # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]
103 # output_clusters=[0, 64513, 25]>
104 MODELS_INFO: [(f" {LEGRAND}", " Dimmer switch w/o neutral")],
105 ENDPOINTS: {
106 1: {
107 PROFILE_ID: zha.PROFILE_ID,
108 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
109 INPUT_CLUSTERS: [
110 Basic.cluster_id,
111 Identify.cluster_id,
112 Groups.cluster_id,
113 OnOff.cluster_id,
114 LevelControl.cluster_id,
115 Scenes.cluster_id,
116 BinaryInput.cluster_id,
117 MANUFACTURER_SPECIFIC_CLUSTER_ID,
118 ],
119 OUTPUT_CLUSTERS: [
120 Basic.cluster_id,
121 MANUFACTURER_SPECIFIC_CLUSTER_ID,
122 Ota.cluster_id,
123 ],
124 },
125 242: {
126 PROFILE_ID: 41440,
127 DEVICE_TYPE: 0x0061,
128 INPUT_CLUSTERS: [],
129 OUTPUT_CLUSTERS: [0x0021],
130 },
131 },
132 }
133
134
135 class DimmerWithNeutral(DimmerWithoutNeutral):
136 """Dimmer switch with neutral."""
137
138 signature = {
139 # <SimpleDescriptor endpoint=1 profile=260 device_type=256
140 # device_version=1
141 # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]
142 # output_clusters=[0, 25, 64513]>
143 MODELS_INFO: [(f" {LEGRAND}", " Dimmer switch with neutral")],
144 ENDPOINTS: {
145 1: {
146 PROFILE_ID: zha.PROFILE_ID,
147 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
148 INPUT_CLUSTERS: [
149 Basic.cluster_id,
150 Identify.cluster_id,
151 Groups.cluster_id,
152 OnOff.cluster_id,
153 LevelControl.cluster_id,
154 Scenes.cluster_id,
155 BinaryInput.cluster_id,
156 MANUFACTURER_SPECIFIC_CLUSTER_ID,
157 ],
158 OUTPUT_CLUSTERS: [
159 Basic.cluster_id,
160 MANUFACTURER_SPECIFIC_CLUSTER_ID,
161 Ota.cluster_id,
162 ],
163 },
164 242: {
165 PROFILE_ID: 41440,
166 DEVICE_TYPE: 0x0066,
167 INPUT_CLUSTERS: [0x0021],
168 OUTPUT_CLUSTERS: [0x0021],
169 },
170 },
171 }
172
[end of zhaquirks/legrand/dimmer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/legrand/dimmer.py b/zhaquirks/legrand/dimmer.py
--- a/zhaquirks/legrand/dimmer.py
+++ b/zhaquirks/legrand/dimmer.py
@@ -132,6 +132,47 @@
}
+class DimmerWithoutNeutral3(DimmerWithoutNeutral):
+ """Dimmer switch w/o neutral (at least for firmware 0x2e3)."""
+
+ signature = {
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=256
+ # device_version=1
+ # input_clusters=[0, 3, 4, 5, 6, 8, 15, 64513]
+ # output_clusters=[0, 5, 6, 25, 64513]>
+ MODELS_INFO: [(f" {LEGRAND}", " Dimmer switch w/o neutral")],
+ ENDPOINTS: {
+ 1: {
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
+ INPUT_CLUSTERS: [
+ Basic.cluster_id,
+ Identify.cluster_id,
+ Groups.cluster_id,
+ OnOff.cluster_id,
+ LevelControl.cluster_id,
+ Scenes.cluster_id,
+ BinaryInput.cluster_id,
+ MANUFACTURER_SPECIFIC_CLUSTER_ID,
+ ],
+ OUTPUT_CLUSTERS: [
+ Basic.cluster_id,
+ MANUFACTURER_SPECIFIC_CLUSTER_ID,
+ Ota.cluster_id,
+ OnOff.cluster_id,
+ Scenes.cluster_id,
+ ],
+ },
+ 242: {
+ PROFILE_ID: 41440,
+ DEVICE_TYPE: 0x0066,
+ INPUT_CLUSTERS: [0x0021],
+ OUTPUT_CLUSTERS: [0x0021],
+ },
+ },
+ }
+
+
class DimmerWithNeutral(DimmerWithoutNeutral):
"""Dimmer switch with neutral."""
| {"golden_diff": "diff --git a/zhaquirks/legrand/dimmer.py b/zhaquirks/legrand/dimmer.py\n--- a/zhaquirks/legrand/dimmer.py\n+++ b/zhaquirks/legrand/dimmer.py\n@@ -132,6 +132,47 @@\n }\n \n \n+class DimmerWithoutNeutral3(DimmerWithoutNeutral):\n+ \"\"\"Dimmer switch w/o neutral (at least for firmware 0x2e3).\"\"\"\n+\n+ signature = {\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n+ # device_version=1\n+ # input_clusters=[0, 3, 4, 5, 6, 8, 15, 64513]\n+ # output_clusters=[0, 5, 6, 25, 64513]>\n+ MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Scenes.cluster_id,\n+ BinaryInput.cluster_id,\n+ MANUFACTURER_SPECIFIC_CLUSTER_ID,\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ MANUFACTURER_SPECIFIC_CLUSTER_ID,\n+ Ota.cluster_id,\n+ OnOff.cluster_id,\n+ Scenes.cluster_id,\n+ ],\n+ },\n+ 242: {\n+ PROFILE_ID: 41440,\n+ DEVICE_TYPE: 0x0066,\n+ INPUT_CLUSTERS: [0x0021],\n+ OUTPUT_CLUSTERS: [0x0021],\n+ },\n+ },\n+ }\n+\n+\n class DimmerWithNeutral(DimmerWithoutNeutral):\n \"\"\"Dimmer switch with neutral.\"\"\"\n", "issue": "[Device Support Request] Legrand Dimmer switch w/o neutral for sw_build_id=002e\nHi, guy!\r\n\r\nI have Legrand Dimmer switch w/o neutral which is not recognized neither as DimmerWithoutNeutral nor as DimmerWithoutNeutral2.\r\n\r\nThe device runs this firmware:\r\n```\r\nhw_version = 7\r\nstack_version = 66\r\nsw_build_id = 002e\r\n```\r\n\r\nIn this firmware Legrand changed the device type for endpoint=242 from 0x0061 to 0x0066, and they also added a few more clusters:\r\n```\r\nendpoint=1: out: 0x0006, 0x0005\r\nendpoint=242: in: 0x0021\r\n```\r\n\r\nHere is a complete device signature\r\n```\r\n{\r\n \"node_descriptor\": \"<NodeDescriptor byte1=17 byte2=64 mac_capability_flags=142 manufacturer_code=4129 maximum_buffer_size=89 maximum_incoming_transfer_size=63 server_mask=10752 maximum_outgoing_transfer_size=63 descriptor_capability_field=0>\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0100\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0003\",\r\n \"0x0004\",\r\n \"0x0005\",\r\n \"0x0006\",\r\n \"0x0008\",\r\n \"0x000f\",\r\n \"0xfc01\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0000\",\r\n \"0x0019\",\r\n \"0xfc01\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \" Legrand\",\r\n \"model\": \" Dimmer switch w/o neutral\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n```\r\n\r\nI've updated the definition of DimmerWithoutNeutral2 (see below) and now the device is properly recognized.\r\n\r\n```\r\nclass DimmerWithoutNeutral2(DimmerWithoutNeutral):\r\n \"\"\"Dimmer switch w/o neutral 2.\"\"\"\r\n\r\n signature = {\r\n # <SimpleDescriptor endpoint=1 profile=260 device_type=256\r\n # device_version=1\r\n # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\r\n # output_clusters=[0, 64513, 25]>\r\n MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\r\n ENDPOINTS: {\r\n 1: {\r\n PROFILE_ID: zha.PROFILE_ID,\r\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\r\n INPUT_CLUSTERS: [\r\n Basic.cluster_id,\r\n Identify.cluster_id,\r\n Groups.cluster_id,\r\n OnOff.cluster_id,\r\n LevelControl.cluster_id,\r\n Scenes.cluster_id,\r\n BinaryInput.cluster_id,\r\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\r\n ],\r\n OUTPUT_CLUSTERS: [\r\n Basic.cluster_id,\r\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\r\n Ota.cluster_id,\r\n OnOff.cluster_id,\r\n Scenes.cluster_id,\r\n ],\r\n },\r\n 242: {\r\n PROFILE_ID: 41440,\r\n DEVICE_TYPE: 0x0066,\r\n INPUT_CLUSTERS: [0x0021],\r\n OUTPUT_CLUSTERS: [0x0021],\r\n },\r\n },\r\n }\r\n```\r\n\r\nPlease add this quirk to master. \n", "before_files": [{"content": "\"\"\"Device handler for Legrand Dimmer switch w/o neutral.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import (\n Basic,\n BinaryInput,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n)\nfrom zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\nfrom zhaquirks.legrand import LEGRAND\n\nMANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC01 # decimal = 64513\n\n\nclass LegrandCluster(CustomCluster, ManufacturerSpecificCluster):\n \"\"\"LegrandCluster.\"\"\"\n\n cluster_id = MANUFACTURER_SPECIFIC_CLUSTER_ID\n name = \"LegrandCluster\"\n ep_attribute = \"legrand_cluster\"\n manufacturer_attributes = {\n 0x0000: (\"dimmer\", t.data16),\n 0x0001: (\"led_dark\", t.Bool),\n 0x0002: (\"led_on\", t.Bool),\n }\n\n\nclass DimmerWithoutNeutral(CustomDevice):\n \"\"\"Dimmer switch w/o neutral.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n # device_version=1\n # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\n # output_clusters=[0, 64513, 25]>\n MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n Ota.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n LegrandCluster,\n ],\n OUTPUT_CLUSTERS: [Basic.cluster_id, LegrandCluster, Ota.cluster_id],\n }\n }\n }\n\n\nclass DimmerWithoutNeutral2(DimmerWithoutNeutral):\n \"\"\"Dimmer switch w/o neutral 2.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n # device_version=1\n # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\n # output_clusters=[0, 64513, 25]>\n MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n Ota.cluster_id,\n ],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 0x0061,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [0x0021],\n },\n },\n }\n\n\nclass DimmerWithNeutral(DimmerWithoutNeutral):\n \"\"\"Dimmer switch with neutral.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n # device_version=1\n # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\n # output_clusters=[0, 25, 64513]>\n MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch with neutral\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n Ota.cluster_id,\n ],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 0x0066,\n INPUT_CLUSTERS: [0x0021],\n OUTPUT_CLUSTERS: [0x0021],\n },\n },\n }\n", "path": "zhaquirks/legrand/dimmer.py"}]} | 3,012 | 468 |
gh_patches_debug_59429 | rasdani/github-patches | git_diff | Textualize__rich-3105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `font-family` ignored in `html_export` due to user agent stylesheet for `<code>`
- [X] I've checked [docs](https://rich.readthedocs.io/en/latest/introduction.html) and [closed issues](https://github.com/Textualize/rich/issues?q=is%3Aissue+is%3Aclosed) for possible solutions.
- [X] I can't find my issue in the [FAQ](https://github.com/Textualize/rich/blob/master/FAQ.md).
**Describe the bug**
Run this code:
```py
import rich.console
try:
test = 1
raise Exception()
except Exception:
console = rich.console.Console(record=True)
console.print_exception(show_locals=True)
html = console.export_html(inline_styles=True)
with open("test.html", "w") as html_file:
html_file.write(html)
```
You will get an `test.html` output file. Open it in Chrome.
I'm on macOS, and it shows up like this:

Notice the lines are not aligned properly on the right side. Here is why:

As you can see, Chrome's user agent stylesheet causes the `<code>` element to reset the `font-family` on the `<pre>` element back to `monospace`. All we need is to have Rich add a `font-family: inherit;` on the `<code>` element and everything is fine:

**Platform**
<details>
<summary>Click to expand</summary>
What platform (Win/Linux/Mac) are you running on? What terminal software are you using?
Mac with Chrome
```
❯ python -m rich.diagnose
╭───────────────────────── <class 'rich.console.Console'> ─────────────────────────╮
│ A high level console interface. │
│ │
│ ╭──────────────────────────────────────────────────────────────────────────────╮ │
│ │ <console width=148 ColorSystem.TRUECOLOR> │ │
│ ╰──────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ color_system = 'truecolor' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 87 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = False │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=148, height=87), │
│ legacy_windows=False, │
│ min_width=1, │
│ max_width=148, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=87, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=148, height=87) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 148 │
╰──────────────────────────────────────────────────────────────────────────────────╯
╭─── <class 'rich._windows.WindowsConsoleFeatures'> ────╮
│ Windows features available. │
│ │
│ ╭───────────────────────────────────────────────────╮ │
│ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │
│ ╰───────────────────────────────────────────────────╯ │
│ │
│ truecolor = False │
│ vt = False │
╰───────────────────────────────────────────────────────╯
╭────── Environment Variables ───────╮
│ { │
│ 'TERM': 'xterm-256color', │
│ 'COLORTERM': 'truecolor', │
│ 'CLICOLOR': None, │
│ 'NO_COLOR': None, │
│ 'TERM_PROGRAM': 'vscode', │
│ 'COLUMNS': None, │
│ 'LINES': None, │
│ 'JUPYTER_COLUMNS': None, │
│ 'JUPYTER_LINES': None, │
│ 'JPY_PARENT_PID': None, │
│ 'VSCODE_VERBOSE_LOGGING': None │
│ } │
╰────────────────────────────────────╯
platform="Darwin"
❯ python -m pip freeze | grep rich
rich==13.4.2
```
</details>
</issue>
<code>
[start of rich/_export_format.py]
1 CONSOLE_HTML_FORMAT = """\
2 <!DOCTYPE html>
3 <html>
4 <head>
5 <meta charset="UTF-8">
6 <style>
7 {stylesheet}
8 body {{
9 color: {foreground};
10 background-color: {background};
11 }}
12 </style>
13 </head>
14 <body>
15 <pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><code>{code}</code></pre>
16 </body>
17 </html>
18 """
19
20 CONSOLE_SVG_FORMAT = """\
21 <svg class="rich-terminal" viewBox="0 0 {width} {height}" xmlns="http://www.w3.org/2000/svg">
22 <!-- Generated with Rich https://www.textualize.io -->
23 <style>
24
25 @font-face {{
26 font-family: "Fira Code";
27 src: local("FiraCode-Regular"),
28 url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2") format("woff2"),
29 url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff") format("woff");
30 font-style: normal;
31 font-weight: 400;
32 }}
33 @font-face {{
34 font-family: "Fira Code";
35 src: local("FiraCode-Bold"),
36 url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2") format("woff2"),
37 url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff") format("woff");
38 font-style: bold;
39 font-weight: 700;
40 }}
41
42 .{unique_id}-matrix {{
43 font-family: Fira Code, monospace;
44 font-size: {char_height}px;
45 line-height: {line_height}px;
46 font-variant-east-asian: full-width;
47 }}
48
49 .{unique_id}-title {{
50 font-size: 18px;
51 font-weight: bold;
52 font-family: arial;
53 }}
54
55 {styles}
56 </style>
57
58 <defs>
59 <clipPath id="{unique_id}-clip-terminal">
60 <rect x="0" y="0" width="{terminal_width}" height="{terminal_height}" />
61 </clipPath>
62 {lines}
63 </defs>
64
65 {chrome}
66 <g transform="translate({terminal_x}, {terminal_y})" clip-path="url(#{unique_id}-clip-terminal)">
67 {backgrounds}
68 <g class="{unique_id}-matrix">
69 {matrix}
70 </g>
71 </g>
72 </svg>
73 """
74
75 _SVG_FONT_FAMILY = "Rich Fira Code"
76 _SVG_CLASSES_PREFIX = "rich-svg"
77
[end of rich/_export_format.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rich/_export_format.py b/rich/_export_format.py
--- a/rich/_export_format.py
+++ b/rich/_export_format.py
@@ -12,7 +12,7 @@
</style>
</head>
<body>
- <pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><code>{code}</code></pre>
+ <pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><code style="font-family:inherit">{code}</code></pre>
</body>
</html>
"""
| {"golden_diff": "diff --git a/rich/_export_format.py b/rich/_export_format.py\n--- a/rich/_export_format.py\n+++ b/rich/_export_format.py\n@@ -12,7 +12,7 @@\n </style>\n </head>\n <body>\n- <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><code>{code}</code></pre>\n+ <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><code style=\"font-family:inherit\">{code}</code></pre>\n </body>\n </html>\n \"\"\"\n", "issue": "[BUG] `font-family` ignored in `html_export` due to user agent stylesheet for `<code>`\n- [X] I've checked [docs](https://rich.readthedocs.io/en/latest/introduction.html) and [closed issues](https://github.com/Textualize/rich/issues?q=is%3Aissue+is%3Aclosed) for possible solutions.\r\n- [X] I can't find my issue in the [FAQ](https://github.com/Textualize/rich/blob/master/FAQ.md).\r\n\r\n**Describe the bug**\r\n\r\nRun this code:\r\n\r\n```py\r\nimport rich.console\r\n\r\ntry:\r\n test = 1\r\n raise Exception()\r\nexcept Exception:\r\n console = rich.console.Console(record=True)\r\n console.print_exception(show_locals=True)\r\n html = console.export_html(inline_styles=True)\r\n with open(\"test.html\", \"w\") as html_file:\r\n html_file.write(html)\r\n```\r\n\r\nYou will get an `test.html` output file. Open it in Chrome.\r\n\r\nI'm on macOS, and it shows up like this:\r\n\r\n\r\n\r\n\r\nNotice the lines are not aligned properly on the right side. Here is why:\r\n\r\n\r\n\r\nAs you can see, Chrome's user agent stylesheet causes the `<code>` element to reset the `font-family` on the `<pre>` element back to `monospace`. All we need is to have Rich add a `font-family: inherit;` on the `<code>` element and everything is fine:\r\n\r\n\r\n\r\n**Platform**\r\n<details>\r\n<summary>Click to expand</summary>\r\n\r\nWhat platform (Win/Linux/Mac) are you running on? What terminal software are you using?\r\nMac with Chrome\r\n\r\n```\r\n\u276f python -m rich.diagnose\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 <class 'rich.console.Console'> \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 A high level console interface. \u2502\r\n\u2502 \u2502\r\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 \u2502 <console width=148 ColorSystem.TRUECOLOR> \u2502 \u2502\r\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 \u2502\r\n\u2502 color_system = 'truecolor' \u2502\r\n\u2502 encoding = 'utf-8' \u2502\r\n\u2502 file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> \u2502\r\n\u2502 height = 87 \u2502\r\n\u2502 is_alt_screen = False \u2502\r\n\u2502 is_dumb_terminal = False \u2502\r\n\u2502 is_interactive = True \u2502\r\n\u2502 is_jupyter = False \u2502\r\n\u2502 is_terminal = True \u2502\r\n\u2502 legacy_windows = False \u2502\r\n\u2502 no_color = False \u2502\r\n\u2502 options = ConsoleOptions( \u2502\r\n\u2502 size=ConsoleDimensions(width=148, height=87), \u2502\r\n\u2502 legacy_windows=False, \u2502\r\n\u2502 min_width=1, \u2502\r\n\u2502 max_width=148, \u2502\r\n\u2502 is_terminal=True, \u2502\r\n\u2502 encoding='utf-8', \u2502\r\n\u2502 max_height=87, \u2502\r\n\u2502 justify=None, \u2502\r\n\u2502 overflow=None, \u2502\r\n\u2502 no_wrap=False, \u2502\r\n\u2502 highlight=None, \u2502\r\n\u2502 markup=None, \u2502\r\n\u2502 height=None \u2502\r\n\u2502 ) \u2502\r\n\u2502 quiet = False \u2502\r\n\u2502 record = False \u2502\r\n\u2502 safe_box = True \u2502\r\n\u2502 size = ConsoleDimensions(width=148, height=87) \u2502\r\n\u2502 soft_wrap = False \u2502\r\n\u2502 stderr = False \u2502\r\n\u2502 style = None \u2502\r\n\u2502 tab_size = 8 \u2502\r\n\u2502 width = 148 \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\n\u256d\u2500\u2500\u2500 <class 'rich._windows.WindowsConsoleFeatures'> \u2500\u2500\u2500\u2500\u256e\r\n\u2502 Windows features available. \u2502\r\n\u2502 \u2502\r\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 \u2502 WindowsConsoleFeatures(vt=False, truecolor=False) \u2502 \u2502\r\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 \u2502\r\n\u2502 truecolor = False \u2502\r\n\u2502 vt = False \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500 Environment Variables \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 { \u2502\r\n\u2502 'TERM': 'xterm-256color', \u2502\r\n\u2502 'COLORTERM': 'truecolor', \u2502\r\n\u2502 'CLICOLOR': None, \u2502\r\n\u2502 'NO_COLOR': None, \u2502\r\n\u2502 'TERM_PROGRAM': 'vscode', \u2502\r\n\u2502 'COLUMNS': None, \u2502\r\n\u2502 'LINES': None, \u2502\r\n\u2502 'JUPYTER_COLUMNS': None, \u2502\r\n\u2502 'JUPYTER_LINES': None, \u2502\r\n\u2502 'JPY_PARENT_PID': None, \u2502\r\n\u2502 'VSCODE_VERBOSE_LOGGING': None \u2502\r\n\u2502 } \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nplatform=\"Darwin\"\r\n\r\n\u276f python -m pip freeze | grep rich\r\nrich==13.4.2\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "CONSOLE_HTML_FORMAT = \"\"\"\\\n<!DOCTYPE html>\n<html>\n<head>\n<meta charset=\"UTF-8\">\n<style>\n{stylesheet}\nbody {{\n color: {foreground};\n background-color: {background};\n}}\n</style>\n</head>\n<body>\n <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><code>{code}</code></pre>\n</body>\n</html>\n\"\"\"\n\nCONSOLE_SVG_FORMAT = \"\"\"\\\n<svg class=\"rich-terminal\" viewBox=\"0 0 {width} {height}\" xmlns=\"http://www.w3.org/2000/svg\">\n <!-- Generated with Rich https://www.textualize.io -->\n <style>\n\n @font-face {{\n font-family: \"Fira Code\";\n src: local(\"FiraCode-Regular\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2\") format(\"woff2\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff\") format(\"woff\");\n font-style: normal;\n font-weight: 400;\n }}\n @font-face {{\n font-family: \"Fira Code\";\n src: local(\"FiraCode-Bold\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2\") format(\"woff2\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff\") format(\"woff\");\n font-style: bold;\n font-weight: 700;\n }}\n\n .{unique_id}-matrix {{\n font-family: Fira Code, monospace;\n font-size: {char_height}px;\n line-height: {line_height}px;\n font-variant-east-asian: full-width;\n }}\n\n .{unique_id}-title {{\n font-size: 18px;\n font-weight: bold;\n font-family: arial;\n }}\n\n {styles}\n </style>\n\n <defs>\n <clipPath id=\"{unique_id}-clip-terminal\">\n <rect x=\"0\" y=\"0\" width=\"{terminal_width}\" height=\"{terminal_height}\" />\n </clipPath>\n {lines}\n </defs>\n\n {chrome}\n <g transform=\"translate({terminal_x}, {terminal_y})\" clip-path=\"url(#{unique_id}-clip-terminal)\">\n {backgrounds}\n <g class=\"{unique_id}-matrix\">\n {matrix}\n </g>\n </g>\n</svg>\n\"\"\"\n\n_SVG_FONT_FAMILY = \"Rich Fira Code\"\n_SVG_CLASSES_PREFIX = \"rich-svg\"\n", "path": "rich/_export_format.py"}]} | 2,590 | 140 |
gh_patches_debug_13093 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1363 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cache + virtualenv>=20 / python_venv + moving executables -> File not found: ... python
failure mode looks something like this:
```
Check for added large files..............................................Failed
- hook id: check-added-large-files
- exit code: 1
Executable `/Users/runner/.cache/pre-commit/repo14qw_y0i/py_env-python3.8/bin/python` not found
```
currently this is a common failure for github actions caches, there's ~2 ways to work around this:
1. [add `$(which python)`](https://github.com/pre-commit/action/commit/ee269b64a608de770696d23079f46238c2f7ab5a) to the pre-commit cache key
2. [manually bump](https://github.com/pypa/pip/pull/7750/files) the pre-commit cache key
but pre-commit should more gracefully detect this in the [`healthy()`](https://github.com/pre-commit/pre-commit/blob/0a8ba31b9b6656d90f94fc368b47acb502cea44d/pre_commit/languages/python.py#L160-L168) function (which is designed to catch these sorts of system breakages)
</issue>
<code>
[start of pre_commit/languages/python.py]
1 import contextlib
2 import functools
3 import os
4 import sys
5 from typing import Callable
6 from typing import ContextManager
7 from typing import Generator
8 from typing import Optional
9 from typing import Sequence
10 from typing import Tuple
11
12 import pre_commit.constants as C
13 from pre_commit.envcontext import envcontext
14 from pre_commit.envcontext import PatchesT
15 from pre_commit.envcontext import UNSET
16 from pre_commit.envcontext import Var
17 from pre_commit.hook import Hook
18 from pre_commit.languages import helpers
19 from pre_commit.parse_shebang import find_executable
20 from pre_commit.prefix import Prefix
21 from pre_commit.util import CalledProcessError
22 from pre_commit.util import clean_path_on_failure
23 from pre_commit.util import cmd_output
24 from pre_commit.util import cmd_output_b
25
26 ENVIRONMENT_DIR = 'py_env'
27
28
29 def bin_dir(venv: str) -> str:
30 """On windows there's a different directory for the virtualenv"""
31 bin_part = 'Scripts' if os.name == 'nt' else 'bin'
32 return os.path.join(venv, bin_part)
33
34
35 def get_env_patch(venv: str) -> PatchesT:
36 return (
37 ('PYTHONHOME', UNSET),
38 ('VIRTUAL_ENV', venv),
39 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
40 )
41
42
43 def _find_by_py_launcher(
44 version: str,
45 ) -> Optional[str]: # pragma: no cover (windows only)
46 if version.startswith('python'):
47 num = version[len('python'):]
48 try:
49 cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')
50 return cmd_output(*cmd)[1].strip()
51 except CalledProcessError:
52 pass
53 return None
54
55
56 def _find_by_sys_executable() -> Optional[str]:
57 def _norm(path: str) -> Optional[str]:
58 _, exe = os.path.split(path.lower())
59 exe, _, _ = exe.partition('.exe')
60 if exe not in {'python', 'pythonw'} and find_executable(exe):
61 return exe
62 return None
63
64 # On linux, I see these common sys.executables:
65 #
66 # system `python`: /usr/bin/python -> python2.7
67 # system `python2`: /usr/bin/python2 -> python2.7
68 # virtualenv v: v/bin/python (will not return from this loop)
69 # virtualenv v -ppython2: v/bin/python -> python2
70 # virtualenv v -ppython2.7: v/bin/python -> python2.7
71 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
72 for path in (sys.executable, os.path.realpath(sys.executable)):
73 exe = _norm(path)
74 if exe:
75 return exe
76 return None
77
78
79 @functools.lru_cache(maxsize=1)
80 def get_default_version() -> str: # pragma: no cover (platform dependent)
81 # First attempt from `sys.executable` (or the realpath)
82 exe = _find_by_sys_executable()
83 if exe:
84 return exe
85
86 # Next try the `pythonX.X` executable
87 exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'
88 if find_executable(exe):
89 return exe
90
91 if _find_by_py_launcher(exe):
92 return exe
93
94 # Give a best-effort try for windows
95 default_folder_name = exe.replace('.', '')
96 if os.path.exists(fr'C:\{default_folder_name}\python.exe'):
97 return exe
98
99 # We tried!
100 return C.DEFAULT
101
102
103 def _sys_executable_matches(version: str) -> bool:
104 if version == 'python':
105 return True
106 elif not version.startswith('python'):
107 return False
108
109 try:
110 info = tuple(int(p) for p in version[len('python'):].split('.'))
111 except ValueError:
112 return False
113
114 return sys.version_info[:len(info)] == info
115
116
117 def norm_version(version: str) -> str:
118 # first see if our current executable is appropriate
119 if _sys_executable_matches(version):
120 return sys.executable
121
122 if os.name == 'nt': # pragma: no cover (windows)
123 version_exec = _find_by_py_launcher(version)
124 if version_exec:
125 return version_exec
126
127 # Try looking up by name
128 version_exec = find_executable(version)
129 if version_exec and version_exec != version:
130 return version_exec
131
132 # If it is in the form pythonx.x search in the default
133 # place on windows
134 if version.startswith('python'):
135 default_folder_name = version.replace('.', '')
136 return fr'C:\{default_folder_name}\python.exe'
137
138 # Otherwise assume it is a path
139 return os.path.expanduser(version)
140
141
142 def py_interface(
143 _dir: str,
144 _make_venv: Callable[[str, str], None],
145 ) -> Tuple[
146 Callable[[Prefix, str], ContextManager[None]],
147 Callable[[Prefix, str], bool],
148 Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],
149 Callable[[Prefix, str, Sequence[str]], None],
150 ]:
151 @contextlib.contextmanager
152 def in_env(
153 prefix: Prefix,
154 language_version: str,
155 ) -> Generator[None, None, None]:
156 envdir = prefix.path(helpers.environment_dir(_dir, language_version))
157 with envcontext(get_env_patch(envdir)):
158 yield
159
160 def healthy(prefix: Prefix, language_version: str) -> bool:
161 with in_env(prefix, language_version):
162 retcode, _, _ = cmd_output_b(
163 'python', '-c',
164 'import ctypes, datetime, io, os, ssl, weakref',
165 cwd='/',
166 retcode=None,
167 )
168 return retcode == 0
169
170 def run_hook(
171 hook: Hook,
172 file_args: Sequence[str],
173 color: bool,
174 ) -> Tuple[int, bytes]:
175 with in_env(hook.prefix, hook.language_version):
176 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
177
178 def install_environment(
179 prefix: Prefix,
180 version: str,
181 additional_dependencies: Sequence[str],
182 ) -> None:
183 additional_dependencies = tuple(additional_dependencies)
184 directory = helpers.environment_dir(_dir, version)
185
186 env_dir = prefix.path(directory)
187 with clean_path_on_failure(env_dir):
188 if version != C.DEFAULT:
189 python = norm_version(version)
190 else:
191 python = os.path.realpath(sys.executable)
192 _make_venv(env_dir, python)
193 with in_env(prefix, version):
194 helpers.run_setup_cmd(
195 prefix, ('pip', 'install', '.') + additional_dependencies,
196 )
197
198 return in_env, healthy, run_hook, install_environment
199
200
201 def make_venv(envdir: str, python: str) -> None:
202 env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')
203 cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)
204 cmd_output_b(*cmd, env=env, cwd='/')
205
206
207 _interface = py_interface(ENVIRONMENT_DIR, make_venv)
208 in_env, healthy, run_hook, install_environment = _interface
209
[end of pre_commit/languages/python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -158,10 +158,12 @@
yield
def healthy(prefix: Prefix, language_version: str) -> bool:
+ envdir = helpers.environment_dir(_dir, language_version)
+ exe_name = 'python.exe' if sys.platform == 'win32' else 'python'
+ py_exe = prefix.path(bin_dir(envdir), exe_name)
with in_env(prefix, language_version):
retcode, _, _ = cmd_output_b(
- 'python', '-c',
- 'import ctypes, datetime, io, os, ssl, weakref',
+ py_exe, '-c', 'import ctypes, datetime, io, os, ssl, weakref',
cwd='/',
retcode=None,
)
| {"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -158,10 +158,12 @@\n yield\n \n def healthy(prefix: Prefix, language_version: str) -> bool:\n+ envdir = helpers.environment_dir(_dir, language_version)\n+ exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n+ py_exe = prefix.path(bin_dir(envdir), exe_name)\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output_b(\n- 'python', '-c',\n- 'import ctypes, datetime, io, os, ssl, weakref',\n+ py_exe, '-c', 'import ctypes, datetime, io, os, ssl, weakref',\n cwd='/',\n retcode=None,\n )\n", "issue": "cache + virtualenv>=20 / python_venv + moving executables -> File not found: ... python\nfailure mode looks something like this:\r\n\r\n```\r\nCheck for added large files..............................................Failed\r\n- hook id: check-added-large-files\r\n- exit code: 1\r\n\r\nExecutable `/Users/runner/.cache/pre-commit/repo14qw_y0i/py_env-python3.8/bin/python` not found\r\n```\r\n\r\ncurrently this is a common failure for github actions caches, there's ~2 ways to work around this:\r\n\r\n1. [add `$(which python)`](https://github.com/pre-commit/action/commit/ee269b64a608de770696d23079f46238c2f7ab5a) to the pre-commit cache key\r\n2. [manually bump](https://github.com/pypa/pip/pull/7750/files) the pre-commit cache key\r\n\r\nbut pre-commit should more gracefully detect this in the [`healthy()`](https://github.com/pre-commit/pre-commit/blob/0a8ba31b9b6656d90f94fc368b47acb502cea44d/pre_commit/languages/python.py#L160-L168) function (which is designed to catch these sorts of system breakages)\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Callable\nfrom typing import ContextManager\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n try:\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n return cmd_output(*cmd)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef py_interface(\n _dir: str,\n _make_venv: Callable[[str, str], None],\n) -> Tuple[\n Callable[[Prefix, str], ContextManager[None]],\n Callable[[Prefix, str], bool],\n Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],\n Callable[[Prefix, str, Sequence[str]], None],\n]:\n @contextlib.contextmanager\n def in_env(\n prefix: Prefix,\n language_version: str,\n ) -> Generator[None, None, None]:\n envdir = prefix.path(helpers.environment_dir(_dir, language_version))\n with envcontext(get_env_patch(envdir)):\n yield\n\n def healthy(prefix: Prefix, language_version: str) -> bool:\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output_b(\n 'python', '-c',\n 'import ctypes, datetime, io, os, ssl, weakref',\n cwd='/',\n retcode=None,\n )\n return retcode == 0\n\n def run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n ) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n\n def install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n ) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n if version != C.DEFAULT:\n python = norm_version(version)\n else:\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('pip', 'install', '.') + additional_dependencies,\n )\n\n return in_env, healthy, run_hook, install_environment\n\n\ndef make_venv(envdir: str, python: str) -> None:\n env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')\n cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n cmd_output_b(*cmd, env=env, cwd='/')\n\n\n_interface = py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python.py"}]} | 2,935 | 200 |
gh_patches_debug_8479 | rasdani/github-patches | git_diff | spacetelescope__jwql-92 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update environment.yml to update Django version
When running the Django web server (on the `laurenmarietta/web-app-dev branch`) from the `jwql` environment on the VM, and I had to update Django from 1.11.8 to the latest version (2.0.5) to get rid of an error with Django.
The version of Django in `environment.yml` should be specified to >=2.0.5 in the environment file in the future.
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.4.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
8 AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
11
12 setup(
13 name='jwql',
14 version=VERSION,
15 description='The JWST Quicklook Project',
16 url='https://github.com/spacetelescope/jwql.git',
17 author=AUTHORS,
18 author_email='[email protected]',
19 license='BSD',
20 keywords=['astronomy', 'python'],
21 classifiers=['Programming Language :: Python'],
22 packages=find_packages(),
23 install_requires=REQUIRES,
24 include_package_data=True,
25 include_dirs=[np.get_include()],
26 )
27
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
-REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
setup(
name='jwql',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\n AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n \n-REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n \n setup(\n name='jwql',\n", "issue": "Update environment.yml to update Django version\nWhen running the Django web server (on the `laurenmarietta/web-app-dev branch`) from the `jwql` environment on the VM, and I had to update Django from 1.11.8 to the latest version (2.0.5) to get rid of an error with Django.\r\n\r\nThe version of Django in `environment.yml` should be specified to >=2.0.5 in the environment file in the future.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n )\n", "path": "setup.py"}]} | 912 | 201 |
gh_patches_debug_26629 | rasdani/github-patches | git_diff | GoogleCloudPlatform__PerfKitBenchmarker-73 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The cluster boot benchmark should the num_cpus function in parallel
The cluster boot benchmark has the following code:
> for vm in vms:
> metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
> 'machine_instance': vm_number}
> value = vm.TimeToBoot()
This looks great until you realize vm.num_cpus is a method on the virtual machine which in turn calls RemoteCommand leading to an ssh. When large number of VM's boot the result is a long set of serially run ssh's to each VM. This could be done a lot faster by moving the code into a method and then using RunThreaded.
</issue>
<code>
[start of perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py]
1 # Copyright 2014 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Runs a cluster boot benchmark."""
16
17 import logging
18
19 from perfkitbenchmarker import flags
20
21 FLAGS = flags.FLAGS
22 BENCHMARK_INFO = {'name': 'cluster boot',
23 'description': 'Create a cluster, record all times to boot',
24 'scratch_disk': False,
25 'num_machines': None} # Set in GetInfo()
26
27
28 def GetInfo():
29 BENCHMARK_INFO['num_machines'] = FLAGS.num_vms
30 return BENCHMARK_INFO
31
32
33 def Prepare(unused_benchmark_spec):
34 pass
35
36
37 def Run(benchmark_spec):
38 """Measure the boot time for all VMs.
39
40 Args:
41 benchmark_spec: The benchmark specification. Contains all data that is
42 required to run the benchmark.
43
44 Returns:
45 A list of samples in the form of 3 or 4 tuples. The tuples contain
46 the sample metric (string), value (float), and unit (string).
47 If a 4th element is included, it is a dictionary of sample
48 metadata.
49 """
50
51 samples = []
52 vm_number = 0
53 logging.info('Boot Results:')
54 vms = benchmark_spec.vms
55 for vm in vms:
56 metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
57 'machine_instance': vm_number}
58 value = vm.TimeToBoot()
59 assert value is not None
60 samples.append(('Boot Time', value, 'seconds', metadata))
61 vm_number += 1
62 logging.info(samples)
63 assert vm_number == benchmark_spec.num_vms
64 return samples
65
66
67 def Cleanup(unused_benchmark_spec):
68 pass
69
[end of perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
--- a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
+++ b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py
@@ -17,6 +17,7 @@
import logging
from perfkitbenchmarker import flags
+from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'cluster boot',
@@ -34,6 +35,14 @@
pass
+def _GetTimeToBoot(vm, vm_index, result_list):
+ metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
+ 'machine_instance': vm_index}
+ value = vm.TimeToBoot()
+ assert value is not None
+ result_list.append(('Boot Time', value, 'seconds', metadata))
+
+
def Run(benchmark_spec):
"""Measure the boot time for all VMs.
@@ -49,18 +58,12 @@
"""
samples = []
- vm_number = 0
logging.info('Boot Results:')
vms = benchmark_spec.vms
- for vm in vms:
- metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,
- 'machine_instance': vm_number}
- value = vm.TimeToBoot()
- assert value is not None
- samples.append(('Boot Time', value, 'seconds', metadata))
- vm_number += 1
+ params = [((vm, i, samples), {}) for i, vm in enumerate(vms)]
+ vm_util.RunThreaded(_GetTimeToBoot, params)
logging.info(samples)
- assert vm_number == benchmark_spec.num_vms
+ assert len(samples) == benchmark_spec.num_vms
return samples
| {"golden_diff": "diff --git a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n--- a/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n+++ b/perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py\n@@ -17,6 +17,7 @@\n import logging\n \n from perfkitbenchmarker import flags\n+from perfkitbenchmarker import vm_util\n \n FLAGS = flags.FLAGS\n BENCHMARK_INFO = {'name': 'cluster boot',\n@@ -34,6 +35,14 @@\n pass\n \n \n+def _GetTimeToBoot(vm, vm_index, result_list):\n+ metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n+ 'machine_instance': vm_index}\n+ value = vm.TimeToBoot()\n+ assert value is not None\n+ result_list.append(('Boot Time', value, 'seconds', metadata))\n+\n+\n def Run(benchmark_spec):\n \"\"\"Measure the boot time for all VMs.\n \n@@ -49,18 +58,12 @@\n \"\"\"\n \n samples = []\n- vm_number = 0\n logging.info('Boot Results:')\n vms = benchmark_spec.vms\n- for vm in vms:\n- metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n- 'machine_instance': vm_number}\n- value = vm.TimeToBoot()\n- assert value is not None\n- samples.append(('Boot Time', value, 'seconds', metadata))\n- vm_number += 1\n+ params = [((vm, i, samples), {}) for i, vm in enumerate(vms)]\n+ vm_util.RunThreaded(_GetTimeToBoot, params)\n logging.info(samples)\n- assert vm_number == benchmark_spec.num_vms\n+ assert len(samples) == benchmark_spec.num_vms\n return samples\n", "issue": "The cluster boot benchmark should the num_cpus function in parallel\nThe cluster boot benchmark has the following code:\n\n> for vm in vms:\n> metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n> 'machine_instance': vm_number}\n> value = vm.TimeToBoot()\n\nThis looks great until you realize vm.num_cpus is a method on the virtual machine which in turn calls RemoteCommand leading to an ssh. When large number of VM's boot the result is a long set of serially run ssh's to each VM. This could be done a lot faster by moving the code into a method and then using RunThreaded.\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs a cluster boot benchmark.\"\"\"\n\nimport logging\n\nfrom perfkitbenchmarker import flags\n\nFLAGS = flags.FLAGS\nBENCHMARK_INFO = {'name': 'cluster boot',\n 'description': 'Create a cluster, record all times to boot',\n 'scratch_disk': False,\n 'num_machines': None} # Set in GetInfo()\n\n\ndef GetInfo():\n BENCHMARK_INFO['num_machines'] = FLAGS.num_vms\n return BENCHMARK_INFO\n\n\ndef Prepare(unused_benchmark_spec):\n pass\n\n\ndef Run(benchmark_spec):\n \"\"\"Measure the boot time for all VMs.\n\n Args:\n benchmark_spec: The benchmark specification. Contains all data that is\n required to run the benchmark.\n\n Returns:\n A list of samples in the form of 3 or 4 tuples. The tuples contain\n the sample metric (string), value (float), and unit (string).\n If a 4th element is included, it is a dictionary of sample\n metadata.\n \"\"\"\n\n samples = []\n vm_number = 0\n logging.info('Boot Results:')\n vms = benchmark_spec.vms\n for vm in vms:\n metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus,\n 'machine_instance': vm_number}\n value = vm.TimeToBoot()\n assert value is not None\n samples.append(('Boot Time', value, 'seconds', metadata))\n vm_number += 1\n logging.info(samples)\n assert vm_number == benchmark_spec.num_vms\n return samples\n\n\ndef Cleanup(unused_benchmark_spec):\n pass\n", "path": "perfkitbenchmarker/benchmarks/cluster_boot_benchmark.py"}]} | 1,307 | 425 |
gh_patches_debug_7671 | rasdani/github-patches | git_diff | facebookresearch__mmf-159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Minor bug in object detections in extract_features_vmb.py
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
I believe this was introduced by the #127, but there's a slight numpy indexing issue that causes the objects to still be detected incorrectly.
The line in question is [here](https://github.com/facebookresearch/pythia/blob/12f67cd4f67499814bb0b3665ff14dd635800f63/pythia/scripts/features/extract_features_vmb.py#L165). It currently reads
```python
objects = torch.argmax(scores[keep_boxes][start_index:], dim=1)
```
However, `scores` is a tensor of `[num_objects, object_classes]`, so `start_index` should be indexing the second dimension. The updated line should be
```python
objects = torch.argmax(scores[keep_boxes][:, start_index:], dim=1)
```
I can submit a pull request.
</issue>
<code>
[start of pythia/scripts/features/extract_features_vmb.py]
1 # Requires vqa-maskrcnn-benchmark to be built and installed
2 # Category mapping for visual genome can be downloaded from
3 # https://dl.fbaipublicfiles.com/pythia/data/visual_genome_categories.json
4 # When the --background flag is set, the index saved with key "objects" in
5 # info_list will be +1 of the Visual Genome category mapping above and 0
6 # is the background class. When the --background flag is not set, the
7 # index saved with key "objects" in info list will match the Visual Genome
8 # category mapping.
9 import argparse
10 import glob
11 import os
12
13 import cv2
14 import numpy as np
15 import torch
16 from PIL import Image
17
18 from maskrcnn_benchmark.config import cfg
19 from maskrcnn_benchmark.layers import nms
20 from maskrcnn_benchmark.modeling.detector import build_detection_model
21 from maskrcnn_benchmark.structures.image_list import to_image_list
22 from maskrcnn_benchmark.utils.model_serialization import load_state_dict
23 from pythia.utils.general import download_file
24
25
26 class FeatureExtractor:
27 MODEL_URL = (
28 "https://dl.fbaipublicfiles.com/pythia/detectron_model/detectron_model.pth"
29 )
30 CONFIG_URL = (
31 "https://dl.fbaipublicfiles.com/pythia/detectron_model/detectron_model.yaml"
32 )
33 MAX_SIZE = 1333
34 MIN_SIZE = 800
35
36 def __init__(self):
37 self.args = self.get_parser().parse_args()
38 self.detection_model = self._build_detection_model()
39
40 os.makedirs(self.args.output_folder, exist_ok=True)
41
42 def _try_downloading_necessities(self):
43 if self.args.model_file is None:
44 print("Downloading model and configuration")
45 self.args.model_file = self.MODEL_URL.split("/")[-1]
46 self.args.config_file = self.CONFIG_URL.split("/")[-1]
47 download_file(self.MODEL_URL)
48 download_file(self.CONFIG_URL)
49
50 def get_parser(self):
51 parser = argparse.ArgumentParser()
52 parser.add_argument(
53 "--model_file", default=None, type=str, help="Detectron model file"
54 )
55 parser.add_argument(
56 "--config_file", default=None, type=str, help="Detectron config file"
57 )
58 parser.add_argument("--batch_size", type=int, default=2, help="Batch size")
59 parser.add_argument(
60 "--num_features", type=int, default=100, help="Number of features to extract."
61 )
62 parser.add_argument(
63 "--output_folder", type=str, default="./output", help="Output folder"
64 )
65 parser.add_argument("--image_dir", type=str, help="Image directory or file")
66 parser.add_argument(
67 "--feature_name", type=str, help="The name of the feature to extract",
68 default="fc6",
69 )
70 parser.add_argument(
71 "--confidence_threshold", type=float, default=0,
72 help="Threshold of detection confidence above which boxes will be selected"
73 )
74 parser.add_argument(
75 "--background", action="store_true",
76 help="The model will output predictions for the background class when set"
77 )
78 return parser
79
80 def _build_detection_model(self):
81 cfg.merge_from_file(self.args.config_file)
82 cfg.freeze()
83
84 model = build_detection_model(cfg)
85 checkpoint = torch.load(self.args.model_file, map_location=torch.device("cpu"))
86
87 load_state_dict(model, checkpoint.pop("model"))
88
89 model.to("cuda")
90 model.eval()
91 return model
92
93 def _image_transform(self, path):
94 img = Image.open(path)
95 im = np.array(img).astype(np.float32)
96 # IndexError: too many indices for array, grayscale images
97 if len(im.shape) < 3:
98 im = np.repeat(im[:, :, np.newaxis], 3, axis=2)
99 im = im[:, :, ::-1]
100 im -= np.array([102.9801, 115.9465, 122.7717])
101 im_shape = im.shape
102 im_height = im_shape[0]
103 im_width = im_shape[1]
104 im_size_min = np.min(im_shape[0:2])
105 im_size_max = np.max(im_shape[0:2])
106
107 # Scale based on minimum size
108 im_scale = self.MIN_SIZE / im_size_min
109
110 # Prevent the biggest axis from being more than max_size
111 # If bigger, scale it down
112 if np.round(im_scale * im_size_max) > self.MAX_SIZE:
113 im_scale = self.MAX_SIZE / im_size_max
114
115 im = cv2.resize(
116 im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
117 )
118 img = torch.from_numpy(im).permute(2, 0, 1)
119
120 im_info = {
121 "width": im_width,
122 "height": im_height
123 }
124
125 return img, im_scale, im_info
126
127 def _process_feature_extraction(
128 self, output, im_scales, im_infos, feature_name="fc6", conf_thresh=0
129 ):
130 batch_size = len(output[0]["proposals"])
131 n_boxes_per_image = [len(boxes) for boxes in output[0]["proposals"]]
132 score_list = output[0]["scores"].split(n_boxes_per_image)
133 score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]
134 feats = output[0][feature_name].split(n_boxes_per_image)
135 cur_device = score_list[0].device
136
137 feat_list = []
138 info_list = []
139
140 for i in range(batch_size):
141 dets = output[0]["proposals"][i].bbox / im_scales[i]
142 scores = score_list[i]
143 max_conf = torch.zeros((scores.shape[0])).to(cur_device)
144 conf_thresh_tensor = torch.full_like(max_conf, conf_thresh)
145 start_index = 1
146 # Column 0 of the scores matrix is for the background class
147 if self.args.background:
148 start_index = 0
149 for cls_ind in range(start_index, scores.shape[1]):
150 cls_scores = scores[:, cls_ind]
151 keep = nms(dets, cls_scores, 0.5)
152 max_conf[keep] = torch.where(
153 # Better than max one till now and minimally greater than conf_thresh
154 (cls_scores[keep] > max_conf[keep]) &
155 (cls_scores[keep] > conf_thresh_tensor[keep]),
156 cls_scores[keep], max_conf[keep]
157 )
158
159 sorted_scores, sorted_indices = torch.sort(max_conf, descending=True)
160 num_boxes = (sorted_scores[:self.args.num_features] != 0).sum()
161 keep_boxes = sorted_indices[:self.args.num_features]
162 feat_list.append(feats[i][keep_boxes])
163 bbox = output[0]["proposals"][i][keep_boxes].bbox / im_scales[i]
164 # Predict the class label using the scores
165 objects = torch.argmax(scores[keep_boxes][start_index:], dim=1)
166
167 info_list.append(
168 {
169 "bbox": bbox.cpu().numpy(),
170 "num_boxes": num_boxes.item(),
171 "objects": objects.cpu().numpy(),
172 "image_width": im_infos[i]["width"],
173 "image_height": im_infos[i]["height"],
174 }
175 )
176
177 return feat_list, info_list
178
179 def get_detectron_features(self, image_paths):
180 img_tensor, im_scales, im_infos = [], [], []
181
182 for image_path in image_paths:
183 im, im_scale, im_info = self._image_transform(image_path)
184 img_tensor.append(im)
185 im_scales.append(im_scale)
186 im_infos.append(im_info)
187
188 # Image dimensions should be divisible by 32, to allow convolutions
189 # in detector to work
190 current_img_list = to_image_list(img_tensor, size_divisible=32)
191 current_img_list = current_img_list.to("cuda")
192
193 with torch.no_grad():
194 output = self.detection_model(current_img_list)
195
196 feat_list = self._process_feature_extraction(
197 output, im_scales, im_infos, self.args.feature_name,
198 self.args.confidence_threshold
199 )
200
201 return feat_list
202
203 def _chunks(self, array, chunk_size):
204 for i in range(0, len(array), chunk_size):
205 yield array[i : i + chunk_size]
206
207 def _save_feature(self, file_name, feature, info):
208 file_base_name = os.path.basename(file_name)
209 file_base_name = file_base_name.split(".")[0]
210 info_file_base_name = file_base_name + "_info.npy"
211 file_base_name = file_base_name + ".npy"
212
213 np.save(
214 os.path.join(self.args.output_folder, file_base_name), feature.cpu().numpy()
215 )
216 np.save(os.path.join(self.args.output_folder, info_file_base_name), info)
217
218 def extract_features(self):
219 image_dir = self.args.image_dir
220
221 if os.path.isfile(image_dir):
222 features, infos = self.get_detectron_features([image_dir])
223 self._save_feature(image_dir, features[0], infos[0])
224 else:
225 files = glob.glob(os.path.join(image_dir, "*.jpg"))
226 for chunk in self._chunks(files, self.args.batch_size):
227 features, infos = self.get_detectron_features(chunk)
228 for idx, file_name in enumerate(chunk):
229 self._save_feature(file_name, features[idx], infos[idx])
230
231
232 if __name__ == "__main__":
233 feature_extractor = FeatureExtractor()
234 feature_extractor.extract_features()
235
[end of pythia/scripts/features/extract_features_vmb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythia/scripts/features/extract_features_vmb.py b/pythia/scripts/features/extract_features_vmb.py
--- a/pythia/scripts/features/extract_features_vmb.py
+++ b/pythia/scripts/features/extract_features_vmb.py
@@ -162,7 +162,7 @@
feat_list.append(feats[i][keep_boxes])
bbox = output[0]["proposals"][i][keep_boxes].bbox / im_scales[i]
# Predict the class label using the scores
- objects = torch.argmax(scores[keep_boxes][start_index:], dim=1)
+ objects = torch.argmax(scores[keep_boxes][:, start_index:], dim=1)
info_list.append(
{
| {"golden_diff": "diff --git a/pythia/scripts/features/extract_features_vmb.py b/pythia/scripts/features/extract_features_vmb.py\n--- a/pythia/scripts/features/extract_features_vmb.py\n+++ b/pythia/scripts/features/extract_features_vmb.py\n@@ -162,7 +162,7 @@\n feat_list.append(feats[i][keep_boxes])\n bbox = output[0][\"proposals\"][i][keep_boxes].bbox / im_scales[i]\n # Predict the class label using the scores\n- objects = torch.argmax(scores[keep_boxes][start_index:], dim=1)\n+ objects = torch.argmax(scores[keep_boxes][:, start_index:], dim=1)\n \n info_list.append(\n {\n", "issue": "Minor bug in object detections in extract_features_vmb.py\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nI believe this was introduced by the #127, but there's a slight numpy indexing issue that causes the objects to still be detected incorrectly. \r\n\r\nThe line in question is [here](https://github.com/facebookresearch/pythia/blob/12f67cd4f67499814bb0b3665ff14dd635800f63/pythia/scripts/features/extract_features_vmb.py#L165). It currently reads\r\n\r\n```python\r\nobjects = torch.argmax(scores[keep_boxes][start_index:], dim=1)\r\n```\r\n\r\nHowever, `scores` is a tensor of `[num_objects, object_classes]`, so `start_index` should be indexing the second dimension. The updated line should be\r\n\r\n```python\r\nobjects = torch.argmax(scores[keep_boxes][:, start_index:], dim=1)\r\n```\r\n\r\nI can submit a pull request.\n", "before_files": [{"content": "# Requires vqa-maskrcnn-benchmark to be built and installed\n# Category mapping for visual genome can be downloaded from\n# https://dl.fbaipublicfiles.com/pythia/data/visual_genome_categories.json\n# When the --background flag is set, the index saved with key \"objects\" in\n# info_list will be +1 of the Visual Genome category mapping above and 0\n# is the background class. When the --background flag is not set, the\n# index saved with key \"objects\" in info list will match the Visual Genome\n# category mapping.\nimport argparse\nimport glob\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.layers import nms\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.utils.model_serialization import load_state_dict\nfrom pythia.utils.general import download_file\n\n\nclass FeatureExtractor:\n MODEL_URL = (\n \"https://dl.fbaipublicfiles.com/pythia/detectron_model/detectron_model.pth\"\n )\n CONFIG_URL = (\n \"https://dl.fbaipublicfiles.com/pythia/detectron_model/detectron_model.yaml\"\n )\n MAX_SIZE = 1333\n MIN_SIZE = 800\n\n def __init__(self):\n self.args = self.get_parser().parse_args()\n self.detection_model = self._build_detection_model()\n\n os.makedirs(self.args.output_folder, exist_ok=True)\n\n def _try_downloading_necessities(self):\n if self.args.model_file is None:\n print(\"Downloading model and configuration\")\n self.args.model_file = self.MODEL_URL.split(\"/\")[-1]\n self.args.config_file = self.CONFIG_URL.split(\"/\")[-1]\n download_file(self.MODEL_URL)\n download_file(self.CONFIG_URL)\n\n def get_parser(self):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_file\", default=None, type=str, help=\"Detectron model file\"\n )\n parser.add_argument(\n \"--config_file\", default=None, type=str, help=\"Detectron config file\"\n )\n parser.add_argument(\"--batch_size\", type=int, default=2, help=\"Batch size\")\n parser.add_argument(\n \"--num_features\", type=int, default=100, help=\"Number of features to extract.\"\n )\n parser.add_argument(\n \"--output_folder\", type=str, default=\"./output\", help=\"Output folder\"\n )\n parser.add_argument(\"--image_dir\", type=str, help=\"Image directory or file\")\n parser.add_argument(\n \"--feature_name\", type=str, help=\"The name of the feature to extract\",\n default=\"fc6\",\n )\n parser.add_argument(\n \"--confidence_threshold\", type=float, default=0,\n help=\"Threshold of detection confidence above which boxes will be selected\"\n )\n parser.add_argument(\n \"--background\", action=\"store_true\",\n help=\"The model will output predictions for the background class when set\"\n )\n return parser\n\n def _build_detection_model(self):\n cfg.merge_from_file(self.args.config_file)\n cfg.freeze()\n\n model = build_detection_model(cfg)\n checkpoint = torch.load(self.args.model_file, map_location=torch.device(\"cpu\"))\n\n load_state_dict(model, checkpoint.pop(\"model\"))\n\n model.to(\"cuda\")\n model.eval()\n return model\n\n def _image_transform(self, path):\n img = Image.open(path)\n im = np.array(img).astype(np.float32)\n # IndexError: too many indices for array, grayscale images\n if len(im.shape) < 3:\n im = np.repeat(im[:, :, np.newaxis], 3, axis=2)\n im = im[:, :, ::-1]\n im -= np.array([102.9801, 115.9465, 122.7717])\n im_shape = im.shape\n im_height = im_shape[0]\n im_width = im_shape[1]\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n # Scale based on minimum size\n im_scale = self.MIN_SIZE / im_size_min\n\n # Prevent the biggest axis from being more than max_size\n # If bigger, scale it down\n if np.round(im_scale * im_size_max) > self.MAX_SIZE:\n im_scale = self.MAX_SIZE / im_size_max\n\n im = cv2.resize(\n im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR\n )\n img = torch.from_numpy(im).permute(2, 0, 1)\n\n im_info = {\n \"width\": im_width,\n \"height\": im_height\n }\n\n return img, im_scale, im_info\n\n def _process_feature_extraction(\n self, output, im_scales, im_infos, feature_name=\"fc6\", conf_thresh=0\n ):\n batch_size = len(output[0][\"proposals\"])\n n_boxes_per_image = [len(boxes) for boxes in output[0][\"proposals\"]]\n score_list = output[0][\"scores\"].split(n_boxes_per_image)\n score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]\n feats = output[0][feature_name].split(n_boxes_per_image)\n cur_device = score_list[0].device\n\n feat_list = []\n info_list = []\n\n for i in range(batch_size):\n dets = output[0][\"proposals\"][i].bbox / im_scales[i]\n scores = score_list[i]\n max_conf = torch.zeros((scores.shape[0])).to(cur_device)\n conf_thresh_tensor = torch.full_like(max_conf, conf_thresh)\n start_index = 1\n # Column 0 of the scores matrix is for the background class\n if self.args.background:\n start_index = 0\n for cls_ind in range(start_index, scores.shape[1]):\n cls_scores = scores[:, cls_ind]\n keep = nms(dets, cls_scores, 0.5)\n max_conf[keep] = torch.where(\n # Better than max one till now and minimally greater than conf_thresh\n (cls_scores[keep] > max_conf[keep]) &\n (cls_scores[keep] > conf_thresh_tensor[keep]),\n cls_scores[keep], max_conf[keep]\n )\n\n sorted_scores, sorted_indices = torch.sort(max_conf, descending=True)\n num_boxes = (sorted_scores[:self.args.num_features] != 0).sum()\n keep_boxes = sorted_indices[:self.args.num_features]\n feat_list.append(feats[i][keep_boxes])\n bbox = output[0][\"proposals\"][i][keep_boxes].bbox / im_scales[i]\n # Predict the class label using the scores\n objects = torch.argmax(scores[keep_boxes][start_index:], dim=1)\n\n info_list.append(\n {\n \"bbox\": bbox.cpu().numpy(),\n \"num_boxes\": num_boxes.item(),\n \"objects\": objects.cpu().numpy(),\n \"image_width\": im_infos[i][\"width\"],\n \"image_height\": im_infos[i][\"height\"],\n }\n )\n\n return feat_list, info_list\n\n def get_detectron_features(self, image_paths):\n img_tensor, im_scales, im_infos = [], [], []\n\n for image_path in image_paths:\n im, im_scale, im_info = self._image_transform(image_path)\n img_tensor.append(im)\n im_scales.append(im_scale)\n im_infos.append(im_info)\n\n # Image dimensions should be divisible by 32, to allow convolutions\n # in detector to work\n current_img_list = to_image_list(img_tensor, size_divisible=32)\n current_img_list = current_img_list.to(\"cuda\")\n\n with torch.no_grad():\n output = self.detection_model(current_img_list)\n\n feat_list = self._process_feature_extraction(\n output, im_scales, im_infos, self.args.feature_name,\n self.args.confidence_threshold\n )\n\n return feat_list\n\n def _chunks(self, array, chunk_size):\n for i in range(0, len(array), chunk_size):\n yield array[i : i + chunk_size]\n\n def _save_feature(self, file_name, feature, info):\n file_base_name = os.path.basename(file_name)\n file_base_name = file_base_name.split(\".\")[0]\n info_file_base_name = file_base_name + \"_info.npy\"\n file_base_name = file_base_name + \".npy\"\n\n np.save(\n os.path.join(self.args.output_folder, file_base_name), feature.cpu().numpy()\n )\n np.save(os.path.join(self.args.output_folder, info_file_base_name), info)\n\n def extract_features(self):\n image_dir = self.args.image_dir\n\n if os.path.isfile(image_dir):\n features, infos = self.get_detectron_features([image_dir])\n self._save_feature(image_dir, features[0], infos[0])\n else:\n files = glob.glob(os.path.join(image_dir, \"*.jpg\"))\n for chunk in self._chunks(files, self.args.batch_size):\n features, infos = self.get_detectron_features(chunk)\n for idx, file_name in enumerate(chunk):\n self._save_feature(file_name, features[idx], infos[idx])\n\n\nif __name__ == \"__main__\":\n feature_extractor = FeatureExtractor()\n feature_extractor.extract_features()\n", "path": "pythia/scripts/features/extract_features_vmb.py"}]} | 3,455 | 159 |
gh_patches_debug_33379 | rasdani/github-patches | git_diff | nv-legate__cunumeric-272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle ufunc coverage wrappers more robustly
ref: https://github.com/nv-legate/cunumeric/pull/268/files#r846513290
As noted in the above conversation, the generic callable wrapping for adding coverage reporting is not sufficient.
Numpy (and thus cunumeric) `ufunc` are objects with their own API (https://numpy.org/doc/stable/reference/ufuncs.html) and just using a plain function wrapper makes those methods invisible. Some requirements to decide first:
* Do all the methods of a `ufunc` need to be included in coverage reporting? Or just its `__call__`
If yes, we will need to resort to a wrapping object (and then: is it sufficient to just create a purpose-built `ufunc_wrapper` or do we need a generic forwarding wrapper?)
If not, we may be able to just wrap and replace `__call__` using the function wrappers similar to the existing ones.
</issue>
<code>
[start of cunumeric/coverage.py]
1 # Copyright 2021-2022 NVIDIA Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from __future__ import annotations
16
17 import warnings
18 from functools import wraps
19 from types import FunctionType, MethodDescriptorType, MethodType, ModuleType
20 from typing import Any, Callable, Container, Optional, cast
21
22 from typing_extensions import Protocol
23
24 from .runtime import runtime
25 from .utils import find_last_user_frames, find_last_user_stacklevel
26
27 __all__ = ("clone_module",)
28
29 FALLBACK_WARNING = (
30 "cuNumeric has not implemented {name} "
31 + "and is falling back to canonical numpy. "
32 + "You may notice significantly decreased performance "
33 + "for this function call."
34 )
35
36 MOD_INTERNAL = {"__dir__", "__getattr__"}
37
38 NDARRAY_INTERNAL = {
39 "__array_finalize__",
40 "__array_function__",
41 "__array_interface__",
42 "__array_prepare__",
43 "__array_priority__",
44 "__array_struct__",
45 "__array_ufunc__",
46 "__array_wrap__",
47 }
48
49
50 def filter_namespace(
51 ns: dict[str, Any],
52 *,
53 omit_names: Optional[Container[str]] = None,
54 omit_types: tuple[type, ...] = (),
55 ) -> dict[str, Any]:
56 omit_names = omit_names or set()
57 return {
58 attr: value
59 for attr, value in ns.items()
60 if attr not in omit_names and not isinstance(value, omit_types)
61 }
62
63
64 class AnyCallable(Protocol):
65 def __call__(self, *args: Any, **kwargs: Any) -> Any:
66 ...
67
68
69 class CuWrapped(Protocol):
70 _cunumeric_implemented: bool
71
72 def __call__(self, *args: Any, **kwargs: Any) -> Any:
73 ...
74
75
76 def implemented(
77 func: AnyCallable, prefix: str, name: str, *, reporting: bool = True
78 ) -> CuWrapped:
79 name = f"{prefix}.{name}"
80
81 wrapper: CuWrapped
82
83 if reporting:
84
85 @wraps(func)
86 def wrapper(*args: Any, **kwargs: Any) -> Any:
87 location = find_last_user_frames(not runtime.report_dump_callstack)
88 runtime.record_api_call(
89 name=name, location=location, implemented=True
90 )
91 return func(*args, **kwargs)
92
93 else:
94
95 wrapper = cast(CuWrapped, func)
96
97 wrapper._cunumeric_implemented = True
98
99 return wrapper
100
101
102 def unimplemented(
103 func: AnyCallable, prefix: str, name: str, *, reporting: bool = True
104 ) -> CuWrapped:
105 name = f"{prefix}.{name}"
106
107 wrapper: CuWrapped
108
109 if reporting:
110
111 @wraps(func)
112 def wrapper(*args: Any, **kwargs: Any) -> Any:
113 location = find_last_user_frames(not runtime.report_dump_callstack)
114 runtime.record_api_call(
115 name=name, location=location, implemented=False
116 )
117 return func(*args, **kwargs)
118
119 else:
120
121 @wraps(func)
122 def wrapper(*args: Any, **kwargs: Any) -> Any:
123 stacklevel = find_last_user_stacklevel()
124 warnings.warn(
125 FALLBACK_WARNING.format(name=name),
126 stacklevel=stacklevel,
127 category=RuntimeWarning,
128 )
129 return func(*args, **kwargs)
130
131 wrapper._cunumeric_implemented = False
132
133 return wrapper
134
135
136 def clone_module(
137 origin_module: ModuleType, new_globals: dict[str, Any]
138 ) -> None:
139 """Copy attributes from one module to another, excluding submodules
140
141 Function types are wrapped with a decorator to report API calls. All
142 other values are copied as-is.
143
144 Parameters
145 ----------
146 origin_module : ModuleTpe
147 Existing module to clone attributes from
148
149 new_globals : dict
150 a globals() dict for the new module to clone into
151
152 Returns
153 -------
154 None
155
156 """
157 mod_name = origin_module.__name__
158
159 missing = filter_namespace(
160 origin_module.__dict__,
161 omit_names=set(new_globals).union(MOD_INTERNAL),
162 omit_types=(ModuleType,),
163 )
164
165 from numpy import ufunc as npufunc
166
167 from ._ufunc.ufunc import ufunc as lgufunc
168
169 reporting = runtime.report_coverage
170
171 for attr, value in new_globals.items():
172 if isinstance(value, (FunctionType, lgufunc)):
173 wrapped = implemented(
174 cast(AnyCallable, value), mod_name, attr, reporting=reporting
175 )
176 new_globals[attr] = wrapped
177
178 for attr, value in missing.items():
179 if isinstance(value, (FunctionType, npufunc)):
180 wrapped = unimplemented(value, mod_name, attr, reporting=reporting)
181 new_globals[attr] = wrapped
182 else:
183 new_globals[attr] = value
184
185
186 def clone_class(origin_class: type) -> Callable[[type], type]:
187 """Copy attributes from one class to another
188
189 Method types are wrapped with a decorator to report API calls. All
190 other values are copied as-is.
191
192 Parameters
193 ----------
194 origin_class : type
195 Existing class type to clone attributes from
196
197 """
198
199 def should_wrap(obj: object) -> bool:
200 return isinstance(
201 obj, (FunctionType, MethodType, MethodDescriptorType)
202 )
203
204 def decorator(cls: type) -> type:
205 class_name = f"{origin_class.__module__}.{origin_class.__name__}"
206
207 missing = filter_namespace(
208 origin_class.__dict__,
209 # this simply omits ndarray internal methods for any class. If
210 # we ever need to wrap more classes we may need to generalize to
211 # per-class specification of internal names to skip
212 omit_names=set(cls.__dict__).union(NDARRAY_INTERNAL),
213 )
214
215 reporting = runtime.report_coverage
216
217 for attr, value in cls.__dict__.items():
218 if should_wrap(value):
219 wrapped = implemented(
220 value, class_name, attr, reporting=reporting
221 )
222 setattr(cls, attr, wrapped)
223
224 for attr, value in missing.items():
225 if should_wrap(value):
226 wrapped = unimplemented(
227 value, class_name, attr, reporting=reporting
228 )
229 setattr(cls, attr, wrapped)
230 else:
231 setattr(cls, attr, value)
232
233 return cls
234
235 return decorator
236
[end of cunumeric/coverage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cunumeric/coverage.py b/cunumeric/coverage.py
--- a/cunumeric/coverage.py
+++ b/cunumeric/coverage.py
@@ -24,7 +24,7 @@
from .runtime import runtime
from .utils import find_last_user_frames, find_last_user_stacklevel
-__all__ = ("clone_module",)
+__all__ = ("clone_class", "clone_module")
FALLBACK_WARNING = (
"cuNumeric has not implemented {name} "
@@ -66,12 +66,9 @@
...
-class CuWrapped(Protocol):
+class CuWrapped(AnyCallable, Protocol):
_cunumeric_implemented: bool
- def __call__(self, *args: Any, **kwargs: Any) -> Any:
- ...
-
def implemented(
func: AnyCallable, prefix: str, name: str, *, reporting: bool = True
@@ -92,7 +89,9 @@
else:
- wrapper = cast(CuWrapped, func)
+ @wraps(func)
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
+ return func(*args, **kwargs)
wrapper._cunumeric_implemented = True
@@ -162,12 +161,10 @@
omit_types=(ModuleType,),
)
- from numpy import ufunc as npufunc
+ reporting = runtime.report_coverage
from ._ufunc.ufunc import ufunc as lgufunc
- reporting = runtime.report_coverage
-
for attr, value in new_globals.items():
if isinstance(value, (FunctionType, lgufunc)):
wrapped = implemented(
@@ -175,6 +172,8 @@
)
new_globals[attr] = wrapped
+ from numpy import ufunc as npufunc
+
for attr, value in missing.items():
if isinstance(value, (FunctionType, npufunc)):
wrapped = unimplemented(value, mod_name, attr, reporting=reporting)
| {"golden_diff": "diff --git a/cunumeric/coverage.py b/cunumeric/coverage.py\n--- a/cunumeric/coverage.py\n+++ b/cunumeric/coverage.py\n@@ -24,7 +24,7 @@\n from .runtime import runtime\n from .utils import find_last_user_frames, find_last_user_stacklevel\n \n-__all__ = (\"clone_module\",)\n+__all__ = (\"clone_class\", \"clone_module\")\n \n FALLBACK_WARNING = (\n \"cuNumeric has not implemented {name} \"\n@@ -66,12 +66,9 @@\n ...\n \n \n-class CuWrapped(Protocol):\n+class CuWrapped(AnyCallable, Protocol):\n _cunumeric_implemented: bool\n \n- def __call__(self, *args: Any, **kwargs: Any) -> Any:\n- ...\n-\n \n def implemented(\n func: AnyCallable, prefix: str, name: str, *, reporting: bool = True\n@@ -92,7 +89,9 @@\n \n else:\n \n- wrapper = cast(CuWrapped, func)\n+ @wraps(func)\n+ def wrapper(*args: Any, **kwargs: Any) -> Any:\n+ return func(*args, **kwargs)\n \n wrapper._cunumeric_implemented = True\n \n@@ -162,12 +161,10 @@\n omit_types=(ModuleType,),\n )\n \n- from numpy import ufunc as npufunc\n+ reporting = runtime.report_coverage\n \n from ._ufunc.ufunc import ufunc as lgufunc\n \n- reporting = runtime.report_coverage\n-\n for attr, value in new_globals.items():\n if isinstance(value, (FunctionType, lgufunc)):\n wrapped = implemented(\n@@ -175,6 +172,8 @@\n )\n new_globals[attr] = wrapped\n \n+ from numpy import ufunc as npufunc\n+\n for attr, value in missing.items():\n if isinstance(value, (FunctionType, npufunc)):\n wrapped = unimplemented(value, mod_name, attr, reporting=reporting)\n", "issue": "Handle ufunc coverage wrappers more robustly\nref: https://github.com/nv-legate/cunumeric/pull/268/files#r846513290\r\n\r\nAs noted in the above conversation, the generic callable wrapping for adding coverage reporting is not sufficient. \r\n\r\nNumpy (and thus cunumeric) `ufunc` are objects with their own API (https://numpy.org/doc/stable/reference/ufuncs.html) and just using a plain function wrapper makes those methods invisible. Some requirements to decide first:\r\n\r\n* Do all the methods of a `ufunc` need to be included in coverage reporting? Or just its `__call__`\r\n\r\nIf yes, we will need to resort to a wrapping object (and then: is it sufficient to just create a purpose-built `ufunc_wrapper` or do we need a generic forwarding wrapper?)\r\n\r\nIf not, we may be able to just wrap and replace `__call__` using the function wrappers similar to the existing ones.\n", "before_files": [{"content": "# Copyright 2021-2022 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nimport warnings\nfrom functools import wraps\nfrom types import FunctionType, MethodDescriptorType, MethodType, ModuleType\nfrom typing import Any, Callable, Container, Optional, cast\n\nfrom typing_extensions import Protocol\n\nfrom .runtime import runtime\nfrom .utils import find_last_user_frames, find_last_user_stacklevel\n\n__all__ = (\"clone_module\",)\n\nFALLBACK_WARNING = (\n \"cuNumeric has not implemented {name} \"\n + \"and is falling back to canonical numpy. \"\n + \"You may notice significantly decreased performance \"\n + \"for this function call.\"\n)\n\nMOD_INTERNAL = {\"__dir__\", \"__getattr__\"}\n\nNDARRAY_INTERNAL = {\n \"__array_finalize__\",\n \"__array_function__\",\n \"__array_interface__\",\n \"__array_prepare__\",\n \"__array_priority__\",\n \"__array_struct__\",\n \"__array_ufunc__\",\n \"__array_wrap__\",\n}\n\n\ndef filter_namespace(\n ns: dict[str, Any],\n *,\n omit_names: Optional[Container[str]] = None,\n omit_types: tuple[type, ...] = (),\n) -> dict[str, Any]:\n omit_names = omit_names or set()\n return {\n attr: value\n for attr, value in ns.items()\n if attr not in omit_names and not isinstance(value, omit_types)\n }\n\n\nclass AnyCallable(Protocol):\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n ...\n\n\nclass CuWrapped(Protocol):\n _cunumeric_implemented: bool\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n ...\n\n\ndef implemented(\n func: AnyCallable, prefix: str, name: str, *, reporting: bool = True\n) -> CuWrapped:\n name = f\"{prefix}.{name}\"\n\n wrapper: CuWrapped\n\n if reporting:\n\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n location = find_last_user_frames(not runtime.report_dump_callstack)\n runtime.record_api_call(\n name=name, location=location, implemented=True\n )\n return func(*args, **kwargs)\n\n else:\n\n wrapper = cast(CuWrapped, func)\n\n wrapper._cunumeric_implemented = True\n\n return wrapper\n\n\ndef unimplemented(\n func: AnyCallable, prefix: str, name: str, *, reporting: bool = True\n) -> CuWrapped:\n name = f\"{prefix}.{name}\"\n\n wrapper: CuWrapped\n\n if reporting:\n\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n location = find_last_user_frames(not runtime.report_dump_callstack)\n runtime.record_api_call(\n name=name, location=location, implemented=False\n )\n return func(*args, **kwargs)\n\n else:\n\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n stacklevel = find_last_user_stacklevel()\n warnings.warn(\n FALLBACK_WARNING.format(name=name),\n stacklevel=stacklevel,\n category=RuntimeWarning,\n )\n return func(*args, **kwargs)\n\n wrapper._cunumeric_implemented = False\n\n return wrapper\n\n\ndef clone_module(\n origin_module: ModuleType, new_globals: dict[str, Any]\n) -> None:\n \"\"\"Copy attributes from one module to another, excluding submodules\n\n Function types are wrapped with a decorator to report API calls. All\n other values are copied as-is.\n\n Parameters\n ----------\n origin_module : ModuleTpe\n Existing module to clone attributes from\n\n new_globals : dict\n a globals() dict for the new module to clone into\n\n Returns\n -------\n None\n\n \"\"\"\n mod_name = origin_module.__name__\n\n missing = filter_namespace(\n origin_module.__dict__,\n omit_names=set(new_globals).union(MOD_INTERNAL),\n omit_types=(ModuleType,),\n )\n\n from numpy import ufunc as npufunc\n\n from ._ufunc.ufunc import ufunc as lgufunc\n\n reporting = runtime.report_coverage\n\n for attr, value in new_globals.items():\n if isinstance(value, (FunctionType, lgufunc)):\n wrapped = implemented(\n cast(AnyCallable, value), mod_name, attr, reporting=reporting\n )\n new_globals[attr] = wrapped\n\n for attr, value in missing.items():\n if isinstance(value, (FunctionType, npufunc)):\n wrapped = unimplemented(value, mod_name, attr, reporting=reporting)\n new_globals[attr] = wrapped\n else:\n new_globals[attr] = value\n\n\ndef clone_class(origin_class: type) -> Callable[[type], type]:\n \"\"\"Copy attributes from one class to another\n\n Method types are wrapped with a decorator to report API calls. All\n other values are copied as-is.\n\n Parameters\n ----------\n origin_class : type\n Existing class type to clone attributes from\n\n \"\"\"\n\n def should_wrap(obj: object) -> bool:\n return isinstance(\n obj, (FunctionType, MethodType, MethodDescriptorType)\n )\n\n def decorator(cls: type) -> type:\n class_name = f\"{origin_class.__module__}.{origin_class.__name__}\"\n\n missing = filter_namespace(\n origin_class.__dict__,\n # this simply omits ndarray internal methods for any class. If\n # we ever need to wrap more classes we may need to generalize to\n # per-class specification of internal names to skip\n omit_names=set(cls.__dict__).union(NDARRAY_INTERNAL),\n )\n\n reporting = runtime.report_coverage\n\n for attr, value in cls.__dict__.items():\n if should_wrap(value):\n wrapped = implemented(\n value, class_name, attr, reporting=reporting\n )\n setattr(cls, attr, wrapped)\n\n for attr, value in missing.items():\n if should_wrap(value):\n wrapped = unimplemented(\n value, class_name, attr, reporting=reporting\n )\n setattr(cls, attr, wrapped)\n else:\n setattr(cls, attr, value)\n\n return cls\n\n return decorator\n", "path": "cunumeric/coverage.py"}]} | 2,840 | 449 |
gh_patches_debug_6992 | rasdani/github-patches | git_diff | ansible__awx-13627 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to use CCP lookup plugin with empty webservice_id
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
When job uses the `CyberArk Central Credential Provider Lookup` credential plugin with an empty web service id, it fails with the exception:
```
Traceback (most recent call last):
File "/awx_devel/awx/main/tasks/jobs.py", line 508, in run
args = self.build_args(self.instance, private_data_dir, passwords)
File "/awx_devel/awx/main/tasks/jobs.py", line 941, in build_args
ssh_username = creds.get_input('username', default='')
File "/awx_devel/awx/main/models/credential/__init__.py", line 275, in get_input
return self._get_dynamic_input(field_name)
File "/awx_devel/awx/main/models/credential/__init__.py", line 309, in _get_dynamic_input
return input_source.get_input_value()
File "/awx_devel/awx/main/models/credential/__init__.py", line 1250, in get_input_value
return backend(**backend_kwargs)
File "/awx_devel/awx/main/credential_plugins/aim.py", line 73, in aim_backend
webservice_id = kwargs['webservice_id']
KeyError: 'webservice_id'
```
The issue is only reproducible if we create a CCP lookup credential using API and we do not provide the `webservice_id` key as the input. If you create CCP lookup with UI - everything works fine.
### AWX version
devel
### Select the relevant components
- [ ] UI
- [X] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
docker development environment
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
1. Create CyberArk Central Credential Provider Lookup credential. Do not provide the WebService ID value, keep it empty. I used API to create credetnail and the webservice_id was missing in the inputs:
```
inputs = {
'url': url,
'app_id': app_id,
'client_key': client_key,
'client_cert': client_cert,
'verify': verify
}
payload = factories.credential.payload(
name=fauxfactory.gen_utf8(),
description=fauxfactory.gen_utf8(),
credential_type=cred_type,
inputs=inputs
)
```
2. Create Machine credential that uses the CCP lookup credential. Set proper Object query.
3. Create Job Template that uses this credential. Run the job.
### Expected results
The lookup should use default webservice id: `AIMWebService`
### Actual results
Exception occured. See description.
### Additional information
_No response_
</issue>
<code>
[start of awx/main/credential_plugins/aim.py]
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 from urllib.parse import quote, urlencode, urljoin
4
5 from django.utils.translation import gettext_lazy as _
6 import requests
7
8 aim_inputs = {
9 'fields': [
10 {
11 'id': 'url',
12 'label': _('CyberArk CCP URL'),
13 'type': 'string',
14 'format': 'url',
15 },
16 {
17 'id': 'webservice_id',
18 'label': _('Web Service ID'),
19 'type': 'string',
20 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
21 },
22 {
23 'id': 'app_id',
24 'label': _('Application ID'),
25 'type': 'string',
26 'secret': True,
27 },
28 {
29 'id': 'client_key',
30 'label': _('Client Key'),
31 'type': 'string',
32 'secret': True,
33 'multiline': True,
34 },
35 {
36 'id': 'client_cert',
37 'label': _('Client Certificate'),
38 'type': 'string',
39 'secret': True,
40 'multiline': True,
41 },
42 {
43 'id': 'verify',
44 'label': _('Verify SSL Certificates'),
45 'type': 'boolean',
46 'default': True,
47 },
48 ],
49 'metadata': [
50 {
51 'id': 'object_query',
52 'label': _('Object Query'),
53 'type': 'string',
54 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
55 },
56 {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
57 {
58 'id': 'reason',
59 'label': _('Reason'),
60 'type': 'string',
61 'help_text': _('Object request reason. This is only needed if it is required by the object\'s policy.'),
62 },
63 ],
64 'required': ['url', 'app_id', 'object_query'],
65 }
66
67
68 def aim_backend(**kwargs):
69 url = kwargs['url']
70 client_cert = kwargs.get('client_cert', None)
71 client_key = kwargs.get('client_key', None)
72 verify = kwargs['verify']
73 webservice_id = kwargs['webservice_id']
74 app_id = kwargs['app_id']
75 object_query = kwargs['object_query']
76 object_query_format = kwargs['object_query_format']
77 reason = kwargs.get('reason', None)
78 if webservice_id == '':
79 webservice_id = 'AIMWebService'
80
81 query_params = {
82 'AppId': app_id,
83 'Query': object_query,
84 'QueryFormat': object_query_format,
85 }
86 if reason:
87 query_params['reason'] = reason
88
89 request_qs = '?' + urlencode(query_params, quote_via=quote)
90 request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
91
92 with CertFiles(client_cert, client_key) as cert:
93 res = requests.get(
94 request_url + request_qs,
95 timeout=30,
96 cert=cert,
97 verify=verify,
98 allow_redirects=False,
99 )
100 raise_for_status(res)
101 return res.json()['Content']
102
103
104 aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
105
[end of awx/main/credential_plugins/aim.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awx/main/credential_plugins/aim.py b/awx/main/credential_plugins/aim.py
--- a/awx/main/credential_plugins/aim.py
+++ b/awx/main/credential_plugins/aim.py
@@ -70,7 +70,7 @@
client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None)
verify = kwargs['verify']
- webservice_id = kwargs['webservice_id']
+ webservice_id = kwargs.get('webservice_id', '')
app_id = kwargs['app_id']
object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format']
| {"golden_diff": "diff --git a/awx/main/credential_plugins/aim.py b/awx/main/credential_plugins/aim.py\n--- a/awx/main/credential_plugins/aim.py\n+++ b/awx/main/credential_plugins/aim.py\n@@ -70,7 +70,7 @@\n client_cert = kwargs.get('client_cert', None)\n client_key = kwargs.get('client_key', None)\n verify = kwargs['verify']\n- webservice_id = kwargs['webservice_id']\n+ webservice_id = kwargs.get('webservice_id', '')\n app_id = kwargs['app_id']\n object_query = kwargs['object_query']\n object_query_format = kwargs['object_query_format']\n", "issue": "Unable to use CCP lookup plugin with empty webservice_id\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nWhen job uses the `CyberArk Central Credential Provider Lookup` credential plugin with an empty web service id, it fails with the exception: \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/awx_devel/awx/main/tasks/jobs.py\", line 508, in run\r\n args = self.build_args(self.instance, private_data_dir, passwords)\r\n File \"/awx_devel/awx/main/tasks/jobs.py\", line 941, in build_args\r\n ssh_username = creds.get_input('username', default='')\r\n File \"/awx_devel/awx/main/models/credential/__init__.py\", line 275, in get_input\r\n return self._get_dynamic_input(field_name)\r\n File \"/awx_devel/awx/main/models/credential/__init__.py\", line 309, in _get_dynamic_input\r\n return input_source.get_input_value()\r\n File \"/awx_devel/awx/main/models/credential/__init__.py\", line 1250, in get_input_value\r\n return backend(**backend_kwargs)\r\n File \"/awx_devel/awx/main/credential_plugins/aim.py\", line 73, in aim_backend\r\n webservice_id = kwargs['webservice_id']\r\nKeyError: 'webservice_id'\r\n```\r\n\r\nThe issue is only reproducible if we create a CCP lookup credential using API and we do not provide the `webservice_id` key as the input. If you create CCP lookup with UI - everything works fine. \r\n\r\n### AWX version\r\n\r\ndevel\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [X] API\r\n- [ ] Docs\r\n- [ ] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\ndocker development environment\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\n1. Create CyberArk Central Credential Provider Lookup credential. Do not provide the WebService ID value, keep it empty. I used API to create credetnail and the webservice_id was missing in the inputs: \r\n\r\n```\r\ninputs = {\r\n 'url': url,\r\n 'app_id': app_id,\r\n 'client_key': client_key,\r\n 'client_cert': client_cert,\r\n 'verify': verify\r\n}\r\n\r\npayload = factories.credential.payload(\r\n name=fauxfactory.gen_utf8(),\r\n description=fauxfactory.gen_utf8(),\r\n credential_type=cred_type,\r\n inputs=inputs\r\n)\r\n```\r\n\r\n2. Create Machine credential that uses the CCP lookup credential. Set proper Object query. \r\n3. Create Job Template that uses this credential. Run the job. \r\n\r\n\r\n\r\n### Expected results\r\n\r\nThe lookup should use default webservice id: `AIMWebService`\r\n\r\n### Actual results\r\n\r\nException occured. See description. \r\n\r\n\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import quote, urlencode, urljoin\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\naim_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('CyberArk CCP URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'webservice_id',\n 'label': _('Web Service ID'),\n 'type': 'string',\n 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),\n },\n {\n 'id': 'app_id',\n 'label': _('Application ID'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'client_key',\n 'label': _('Client Key'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'client_cert',\n 'label': _('Client Certificate'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'verify',\n 'label': _('Verify SSL Certificates'),\n 'type': 'boolean',\n 'default': True,\n },\n ],\n 'metadata': [\n {\n 'id': 'object_query',\n 'label': _('Object Query'),\n 'type': 'string',\n 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),\n },\n {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},\n {\n 'id': 'reason',\n 'label': _('Reason'),\n 'type': 'string',\n 'help_text': _('Object request reason. This is only needed if it is required by the object\\'s policy.'),\n },\n ],\n 'required': ['url', 'app_id', 'object_query'],\n}\n\n\ndef aim_backend(**kwargs):\n url = kwargs['url']\n client_cert = kwargs.get('client_cert', None)\n client_key = kwargs.get('client_key', None)\n verify = kwargs['verify']\n webservice_id = kwargs['webservice_id']\n app_id = kwargs['app_id']\n object_query = kwargs['object_query']\n object_query_format = kwargs['object_query_format']\n reason = kwargs.get('reason', None)\n if webservice_id == '':\n webservice_id = 'AIMWebService'\n\n query_params = {\n 'AppId': app_id,\n 'Query': object_query,\n 'QueryFormat': object_query_format,\n }\n if reason:\n query_params['reason'] = reason\n\n request_qs = '?' + urlencode(query_params, quote_via=quote)\n request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))\n\n with CertFiles(client_cert, client_key) as cert:\n res = requests.get(\n request_url + request_qs,\n timeout=30,\n cert=cert,\n verify=verify,\n allow_redirects=False,\n )\n raise_for_status(res)\n return res.json()['Content']\n\n\naim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)\n", "path": "awx/main/credential_plugins/aim.py"}]} | 2,201 | 150 |
gh_patches_debug_7696 | rasdani/github-patches | git_diff | borgbackup__borg-6129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
it's 2022
check misc. places in source, docs, readme, copyright, license, ... and update to 2022.
</issue>
<code>
[start of docs/conf.py]
1 # documentation build configuration file, created by
2 # sphinx-quickstart on Sat Sep 10 18:18:25 2011.
3 #
4 # This file is execfile()d with the current directory set to its containing dir.
5 #
6 # Note that not all possible configuration values are present in this
7 # autogenerated file.
8 #
9 # All configuration values have a default; values that are commented out
10 # serve to show the default.
11
12 # If extensions (or modules to document with autodoc) are in another directory,
13 # add these directories to sys.path here. If the directory is relative to the
14 # documentation root, use os.path.abspath to make it absolute, like shown here.
15 import sys, os
16 sys.path.insert(0, os.path.abspath('../src'))
17
18 from borg import __version__ as sw_version
19
20 # -- General configuration -----------------------------------------------------
21
22 # If your documentation needs a minimal Sphinx version, state it here.
23 #needs_sphinx = '1.0'
24
25 # Add any Sphinx extension module names here, as strings. They can be extensions
26 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
27 extensions = []
28
29 # Add any paths that contain templates here, relative to this directory.
30 templates_path = ['_templates']
31
32 # The suffix of source filenames.
33 source_suffix = '.rst'
34
35 # The encoding of source files.
36 #source_encoding = 'utf-8-sig'
37
38 # The master toctree document.
39 master_doc = 'index'
40
41 # General information about the project.
42 project = 'Borg - Deduplicating Archiver'
43 copyright = u'2010-2014 Jonas Borgström, 2015-2021 The Borg Collective (see AUTHORS file)'
44
45 # The version info for the project you're documenting, acts as replacement for
46 # |version| and |release|, also used in various other places throughout the
47 # built documents.
48 #
49 # The short X.Y version.
50 split_char = '+' if '+' in sw_version else '-'
51 version = sw_version.split(split_char)[0]
52 # The full version, including alpha/beta/rc tags.
53 release = version
54
55 suppress_warnings = ['image.nonlocal_uri']
56
57 # The language for content autogenerated by Sphinx. Refer to documentation
58 # for a list of supported languages.
59 #language = None
60
61 # There are two options for replacing |today|: either, you set today to some
62 # non-false value, then it is used:
63 #today = ''
64 # Else, today_fmt is used as the format for a strftime call.
65 today_fmt = '%Y-%m-%d'
66
67 # List of patterns, relative to source directory, that match files and
68 # directories to ignore when looking for source files.
69 exclude_patterns = ['_build']
70
71 # The reST default role (used for this markup: `text`) to use for all documents.
72 #default_role = None
73
74 # The Borg docs contain no or very little Python docs.
75 # Thus, the primary domain is rst.
76 primary_domain = 'rst'
77
78 # If true, '()' will be appended to :func: etc. cross-reference text.
79 #add_function_parentheses = True
80
81 # If true, the current module name will be prepended to all description
82 # unit titles (such as .. function::).
83 #add_module_names = True
84
85 # If true, sectionauthor and moduleauthor directives will be shown in the
86 # output. They are ignored by default.
87 #show_authors = False
88
89 # The name of the Pygments (syntax highlighting) style to use.
90 pygments_style = 'sphinx'
91
92 # A list of ignored prefixes for module index sorting.
93 #modindex_common_prefix = []
94
95
96 # -- Options for HTML output ---------------------------------------------------
97
98 # The theme to use for HTML and HTML Help pages. See the documentation for
99 # a list of builtin themes.
100 import guzzle_sphinx_theme
101
102 html_theme_path = guzzle_sphinx_theme.html_theme_path()
103 html_theme = 'guzzle_sphinx_theme'
104
105
106 def set_rst_settings(app):
107 app.env.settings.update({
108 'field_name_limit': 0,
109 'option_limit': 0,
110 })
111
112
113 def setup(app):
114 app.add_css_file('css/borg.css')
115 app.connect('builder-inited', set_rst_settings)
116
117 # Theme options are theme-specific and customize the look and feel of a theme
118 # further. For a list of options available for each theme, see the
119 # documentation.
120 html_theme_options = {
121 'project_nav_name': 'Borg %s' % version,
122 }
123
124 # Add any paths that contain custom themes here, relative to this directory.
125 #html_theme_path = ['_themes']
126
127 # The name for this set of Sphinx documents. If None, it defaults to
128 # "<project> v<release> documentation".
129 #html_title = None
130
131 # A shorter title for the navigation bar. Default is the same as html_title.
132 #html_short_title = None
133
134 # The name of an image file (relative to this directory) to place at the top
135 # of the sidebar.
136 html_logo = '_static/logo.svg'
137
138 # The name of an image file (within the static path) to use as favicon of the
139 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
140 # pixels large.
141 html_favicon = '_static/favicon.ico'
142
143 # Add any paths that contain custom static files (such as style sheets) here,
144 # relative to this directory. They are copied after the builtin static files,
145 # so a file named "default.css" will overwrite the builtin "default.css".
146 html_static_path = ['borg_theme']
147
148 html_extra_path = ['../src/borg/paperkey.html']
149
150 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
151 # using the given strftime format.
152 html_last_updated_fmt = '%Y-%m-%d'
153
154 # If true, SmartyPants will be used to convert quotes and dashes to
155 # typographically correct entities.
156 html_use_smartypants = True
157
158 # Custom sidebar templates, maps document names to template names.
159 html_sidebars = {
160 '**': ['logo-text.html', 'searchbox.html', 'globaltoc.html'],
161 }
162
163 # Additional templates that should be rendered to pages, maps page names to
164 # template names.
165 #html_additional_pages = {}
166
167 # If false, no module index is generated.
168 #html_domain_indices = True
169
170 # If false, no index is generated.
171 html_use_index = False
172
173 # If true, the index is split into individual pages for each letter.
174 #html_split_index = False
175
176 # If true, links to the reST sources are added to the pages.
177 html_show_sourcelink = False
178
179 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
180 html_show_sphinx = False
181
182 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
183 html_show_copyright = False
184
185 # If true, an OpenSearch description file will be output, and all pages will
186 # contain a <link> tag referring to it. The value of this option must be the
187 # base URL from which the finished HTML is served.
188 #html_use_opensearch = ''
189
190 # This is the file name suffix for HTML files (e.g. ".xhtml").
191 #html_file_suffix = None
192
193 # Output file base name for HTML help builder.
194 htmlhelp_basename = 'borgdoc'
195
196
197 # -- Options for LaTeX output --------------------------------------------------
198
199 # Grouping the document tree into LaTeX files. List of tuples
200 # (source start file, target name, title, author, documentclass [howto/manual]).
201 latex_documents = [
202 ('book', 'Borg.tex', 'Borg Documentation',
203 'The Borg Collective', 'manual'),
204 ]
205
206 # The name of an image file (relative to this directory) to place at the top of
207 # the title page.
208 latex_logo = '_static/logo.pdf'
209
210 latex_elements = {
211 'papersize': 'a4paper',
212 'pointsize': '10pt',
213 'figure_align': 'H',
214 }
215
216 # For "manual" documents, if this is true, then toplevel headings are parts,
217 # not chapters.
218 #latex_use_parts = False
219
220 # If true, show page references after internal links.
221 #latex_show_pagerefs = False
222
223 # If true, show URL addresses after external links.
224 latex_show_urls = 'footnote'
225
226 # Additional stuff for the LaTeX preamble.
227 #latex_preamble = ''
228
229 # Documents to append as an appendix to all manuals.
230 latex_appendices = [
231 'support',
232 'resources',
233 'changes',
234 'authors',
235 ]
236
237 # If false, no module index is generated.
238 #latex_domain_indices = True
239
240
241 # -- Options for manual page output --------------------------------------------
242
243 # One entry per manual page. List of tuples
244 # (source start file, name, description, authors, manual section).
245 man_pages = [
246 ('usage', 'borg',
247 'BorgBackup is a deduplicating backup program with optional compression and authenticated encryption.',
248 ['The Borg Collective (see AUTHORS file)'],
249 1),
250 ]
251
252 extensions = [
253 'sphinx.ext.extlinks',
254 'sphinx.ext.autodoc',
255 'sphinx.ext.todo',
256 'sphinx.ext.coverage',
257 'sphinx.ext.viewcode',
258 ]
259
260 extlinks = {
261 'issue': ('https://github.com/borgbackup/borg/issues/%s', '#'),
262 'targz_url': ('https://pypi.python.org/packages/source/b/borgbackup/%%s-%s.tar.gz' % version, None),
263 }
264
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -40,7 +40,7 @@
# General information about the project.
project = 'Borg - Deduplicating Archiver'
-copyright = u'2010-2014 Jonas Borgström, 2015-2021 The Borg Collective (see AUTHORS file)'
+copyright = u'2010-2014 Jonas Borgström, 2015-2022 The Borg Collective (see AUTHORS file)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -40,7 +40,7 @@\n \n # General information about the project.\n project = 'Borg - Deduplicating Archiver'\n-copyright = u'2010-2014 Jonas Borgstr\u00f6m, 2015-2021 The Borg Collective (see AUTHORS file)'\n+copyright = u'2010-2014 Jonas Borgstr\u00f6m, 2015-2022 The Borg Collective (see AUTHORS file)'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n", "issue": "it's 2022\ncheck misc. places in source, docs, readme, copyright, license, ... and update to 2022.\n", "before_files": [{"content": "# documentation build configuration file, created by\n# sphinx-quickstart on Sat Sep 10 18:18:25 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nimport sys, os\nsys.path.insert(0, os.path.abspath('../src'))\n\nfrom borg import __version__ as sw_version\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Borg - Deduplicating Archiver'\ncopyright = u'2010-2014 Jonas Borgstr\u00f6m, 2015-2021 The Borg Collective (see AUTHORS file)'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nsplit_char = '+' if '+' in sw_version else '-'\nversion = sw_version.split(split_char)[0]\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\nsuppress_warnings = ['image.nonlocal_uri']\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\ntoday_fmt = '%Y-%m-%d'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# The Borg docs contain no or very little Python docs.\n# Thus, the primary domain is rst.\nprimary_domain = 'rst'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = 'guzzle_sphinx_theme'\n\n\ndef set_rst_settings(app):\n app.env.settings.update({\n 'field_name_limit': 0,\n 'option_limit': 0,\n })\n\n\ndef setup(app):\n app.add_css_file('css/borg.css')\n app.connect('builder-inited', set_rst_settings)\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'project_nav_name': 'Borg %s' % version,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = ['_themes']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '_static/logo.svg'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['borg_theme']\n\nhtml_extra_path = ['../src/borg/paperkey.html']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%Y-%m-%d'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n '**': ['logo-text.html', 'searchbox.html', 'globaltoc.html'],\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nhtml_show_copyright = False\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'borgdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('book', 'Borg.tex', 'Borg Documentation',\n 'The Borg Collective', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\nlatex_logo = '_static/logo.pdf'\n\nlatex_elements = {\n 'papersize': 'a4paper',\n 'pointsize': '10pt',\n 'figure_align': 'H',\n}\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\nlatex_show_urls = 'footnote'\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\nlatex_appendices = [\n 'support',\n 'resources',\n 'changes',\n 'authors',\n]\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('usage', 'borg',\n 'BorgBackup is a deduplicating backup program with optional compression and authenticated encryption.',\n ['The Borg Collective (see AUTHORS file)'],\n 1),\n]\n\nextensions = [\n 'sphinx.ext.extlinks',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n]\n\nextlinks = {\n 'issue': ('https://github.com/borgbackup/borg/issues/%s', '#'),\n 'targz_url': ('https://pypi.python.org/packages/source/b/borgbackup/%%s-%s.tar.gz' % version, None),\n}\n", "path": "docs/conf.py"}]} | 3,319 | 165 |
gh_patches_debug_2021 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ikea group support bind method doesn't return status as expected
https://github.com/dmulcahey/zha-device-handlers/blob/b5b383939944ff541ee38a94c7f4d6cf3edc611f/zhaquirks/ikea/__init__.py#L25
https://github.com/home-assistant/home-assistant/blob/a30c37017b7782473294d7999e85d7a369a0539a/homeassistant/components/zha/core/helpers.py#L56
reported by @Adminiuga
we should return the status in [ ] so the bind helper in HA is happy.
</issue>
<code>
[start of zhaquirks/ikea/__init__.py]
1 """Ikea module."""
2 import logging
3 from zigpy.zcl.clusters.lightlink import LightLink
4 from zigpy.quirks import CustomCluster
5
6 _LOGGER = logging.getLogger(__name__)
7
8
9 class LightLinkCluster(CustomCluster, LightLink):
10 """Ikea LightLink cluster."""
11
12 async def bind(self):
13 """Bind LightLink cluster to coordinator."""
14 application = self._endpoint.device.application
15 try:
16 coordinator = application.get_device(application.ieee)
17 except KeyError:
18 _LOGGER.warning(
19 "Aborting - unable to locate required coordinator device."
20 )
21 return
22 group_list = await self.get_group_identifiers(0)
23 group_record = group_list[2]
24 group_id = group_record[0].group_id
25 await coordinator.add_to_group(group_id)
26
[end of zhaquirks/ikea/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/ikea/__init__.py b/zhaquirks/ikea/__init__.py
--- a/zhaquirks/ikea/__init__.py
+++ b/zhaquirks/ikea/__init__.py
@@ -22,4 +22,5 @@
group_list = await self.get_group_identifiers(0)
group_record = group_list[2]
group_id = group_record[0].group_id
- await coordinator.add_to_group(group_id)
+ status = await coordinator.add_to_group(group_id)
+ return [status]
| {"golden_diff": "diff --git a/zhaquirks/ikea/__init__.py b/zhaquirks/ikea/__init__.py\n--- a/zhaquirks/ikea/__init__.py\n+++ b/zhaquirks/ikea/__init__.py\n@@ -22,4 +22,5 @@\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n- await coordinator.add_to_group(group_id)\n+ status = await coordinator.add_to_group(group_id)\n+ return [status]\n", "issue": "Ikea group support bind method doesn't return status as expected\nhttps://github.com/dmulcahey/zha-device-handlers/blob/b5b383939944ff541ee38a94c7f4d6cf3edc611f/zhaquirks/ikea/__init__.py#L25\r\n\r\nhttps://github.com/home-assistant/home-assistant/blob/a30c37017b7782473294d7999e85d7a369a0539a/homeassistant/components/zha/core/helpers.py#L56\r\n\r\nreported by @Adminiuga \r\n\r\nwe should return the status in [ ] so the bind helper in HA is happy.\n", "before_files": [{"content": "\"\"\"Ikea module.\"\"\"\nimport logging\nfrom zigpy.zcl.clusters.lightlink import LightLink\nfrom zigpy.quirks import CustomCluster\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass LightLinkCluster(CustomCluster, LightLink):\n \"\"\"Ikea LightLink cluster.\"\"\"\n\n async def bind(self):\n \"\"\"Bind LightLink cluster to coordinator.\"\"\"\n application = self._endpoint.device.application\n try:\n coordinator = application.get_device(application.ieee)\n except KeyError:\n _LOGGER.warning(\n \"Aborting - unable to locate required coordinator device.\"\n )\n return\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n await coordinator.add_to_group(group_id)\n", "path": "zhaquirks/ikea/__init__.py"}]} | 925 | 130 |
gh_patches_debug_11706 | rasdani/github-patches | git_diff | pypa__pip-11318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logging error when checking for new version of pip.
### Description
When pip (22.1.2) checked for a new version it failed with an error. It's coming from the following function:
https://github.com/pypa/pip/blob/c4606b3572529625762f0586dda134302cf6122c/src/pip/_internal/utils/entrypoints.py#L46-L62
The problem call is to `os.path.samefile` on line 58, where it compares the output of `shutil.which('pip')` to `<sys.prefix>/bin/pip` (in my case `/usr/bin/pip`). However, on my system, `pip` is installed to the user site-packages directory (so the binary is at `/home/domdf/.local/bin/pip`).
The solution is to check whether the file exists before calling `samefile`.
I have Python 3.7 and 3.9 installed to `/usr` alongside the system's Python 3.8, and the error is present with all three versions.
### Expected behavior
Pip checks for a new version without an error.
### pip version
22.1.2
### Python version
3.9.13
### OS
Ubuntu 20.04
### How to Reproduce
1. `pip install pip==22.1.2`
2. `pip install pip` <- Any package will do.
### Output
```shell
$ pip install pip
Defaulting to user installation because normal site-packages is not writeable
Requirement already satisfied: pip in ./.local/lib/python3.9/site-packages (22.1.2)
--- Logging error ---
Traceback (most recent call last):
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/logging.py", line 177, in emit
self.console.print(renderable, overflow="ignore", crop=False, style=style)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/console.py", line 1752, in print
extend(render(renderable, render_options))
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/console.py", line 1390, in render
for render_output in iter_render:
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/logging.py", line 134, in __rich_console__
for line in lines:
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/segment.py", line 245, in split_lines
for segment in segments:
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/console.py", line 1368, in render
renderable = rich_cast(renderable)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/protocol.py", line 36, in rich_cast
renderable = cast_method()
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/self_outdated_check.py", line 130, in __rich__
pip_cmd = get_best_invocation_for_this_pip()
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/entrypoints.py", line 58, in get_best_invocation_for_this_pip
if found_executable and os.path.samefile(
File "/usr/lib/python3.9/genericpath.py", line 101, in samefile
s2 = os.stat(f2)
FileNotFoundError: [Errno 2] No such file or directory: '/usr/bin/pip'
Call stack:
File "/usr/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/__main__.py", line 31, in <module>
sys.exit(_main())
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/main.py", line 70, in main
return command.main(cmd_args)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/base_command.py", line 101, in main
return self._main(args)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/base_command.py", line 223, in _main
self.handle_pip_version_check(options)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/req_command.py", line 148, in handle_pip_version_check
pip_self_version_check(session, options)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/self_outdated_check.py", line 237, in pip_self_version_check
logger.info("[present-rich] %s", upgrade_prompt)
File "/usr/lib/python3.9/logging/__init__.py", line 1446, in info
self._log(INFO, msg, args, **kwargs)
File "/usr/lib/python3.9/logging/__init__.py", line 1589, in _log
self.handle(record)
File "/usr/lib/python3.9/logging/__init__.py", line 1599, in handle
self.callHandlers(record)
File "/usr/lib/python3.9/logging/__init__.py", line 1661, in callHandlers
hdlr.handle(record)
File "/usr/lib/python3.9/logging/__init__.py", line 952, in handle
self.emit(record)
File "/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/logging.py", line 179, in emit
self.handleError(record)
Message: '[present-rich] %s'
Arguments: (UpgradePrompt(old='22.1.2', new='22.2'),)
```
### Code of Conduct
- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).
</issue>
<code>
[start of src/pip/_internal/utils/entrypoints.py]
1 import itertools
2 import os
3 import shutil
4 import sys
5 from typing import List, Optional
6
7 from pip._internal.cli.main import main
8 from pip._internal.utils.compat import WINDOWS
9
10 _EXECUTABLE_NAMES = [
11 "pip",
12 f"pip{sys.version_info.major}",
13 f"pip{sys.version_info.major}.{sys.version_info.minor}",
14 ]
15 if WINDOWS:
16 _allowed_extensions = {"", ".exe"}
17 _EXECUTABLE_NAMES = [
18 "".join(parts)
19 for parts in itertools.product(_EXECUTABLE_NAMES, _allowed_extensions)
20 ]
21
22
23 def _wrapper(args: Optional[List[str]] = None) -> int:
24 """Central wrapper for all old entrypoints.
25
26 Historically pip has had several entrypoints defined. Because of issues
27 arising from PATH, sys.path, multiple Pythons, their interactions, and most
28 of them having a pip installed, users suffer every time an entrypoint gets
29 moved.
30
31 To alleviate this pain, and provide a mechanism for warning users and
32 directing them to an appropriate place for help, we now define all of
33 our old entrypoints as wrappers for the current one.
34 """
35 sys.stderr.write(
36 "WARNING: pip is being invoked by an old script wrapper. This will "
37 "fail in a future version of pip.\n"
38 "Please see https://github.com/pypa/pip/issues/5599 for advice on "
39 "fixing the underlying issue.\n"
40 "To avoid this problem you can invoke Python with '-m pip' instead of "
41 "running pip directly.\n"
42 )
43 return main(args)
44
45
46 def get_best_invocation_for_this_pip() -> str:
47 """Try to figure out the best way to invoke pip in the current environment."""
48 binary_directory = "Scripts" if WINDOWS else "bin"
49 binary_prefix = os.path.join(sys.prefix, binary_directory)
50
51 # Try to use pip[X[.Y]] names, if those executables for this environment are
52 # the first on PATH with that name.
53 path_parts = os.path.normcase(os.environ.get("PATH", "")).split(os.pathsep)
54 exe_are_in_PATH = os.path.normcase(binary_prefix) in path_parts
55 if exe_are_in_PATH:
56 for exe_name in _EXECUTABLE_NAMES:
57 found_executable = shutil.which(exe_name)
58 if found_executable and os.path.samefile(
59 found_executable,
60 os.path.join(binary_prefix, exe_name),
61 ):
62 return exe_name
63
64 # Use the `-m` invocation, if there's no "nice" invocation.
65 return f"{get_best_invocation_for_this_python()} -m pip"
66
67
68 def get_best_invocation_for_this_python() -> str:
69 """Try to figure out the best way to invoke the current Python."""
70 exe = sys.executable
71 exe_name = os.path.basename(exe)
72
73 # Try to use the basename, if it's the first executable.
74 found_executable = shutil.which(exe_name)
75 if found_executable and os.path.samefile(found_executable, exe):
76 return exe_name
77
78 # Use the full executable name, because we couldn't find something simpler.
79 return exe
80
[end of src/pip/_internal/utils/entrypoints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/utils/entrypoints.py b/src/pip/_internal/utils/entrypoints.py
--- a/src/pip/_internal/utils/entrypoints.py
+++ b/src/pip/_internal/utils/entrypoints.py
@@ -55,9 +55,14 @@
if exe_are_in_PATH:
for exe_name in _EXECUTABLE_NAMES:
found_executable = shutil.which(exe_name)
- if found_executable and os.path.samefile(
- found_executable,
- os.path.join(binary_prefix, exe_name),
+ binary_executable = os.path.join(binary_prefix, exe_name)
+ if (
+ found_executable
+ and os.path.exists(binary_executable)
+ and os.path.samefile(
+ found_executable,
+ binary_executable,
+ )
):
return exe_name
| {"golden_diff": "diff --git a/src/pip/_internal/utils/entrypoints.py b/src/pip/_internal/utils/entrypoints.py\n--- a/src/pip/_internal/utils/entrypoints.py\n+++ b/src/pip/_internal/utils/entrypoints.py\n@@ -55,9 +55,14 @@\n if exe_are_in_PATH:\n for exe_name in _EXECUTABLE_NAMES:\n found_executable = shutil.which(exe_name)\n- if found_executable and os.path.samefile(\n- found_executable,\n- os.path.join(binary_prefix, exe_name),\n+ binary_executable = os.path.join(binary_prefix, exe_name)\n+ if (\n+ found_executable\n+ and os.path.exists(binary_executable)\n+ and os.path.samefile(\n+ found_executable,\n+ binary_executable,\n+ )\n ):\n return exe_name\n", "issue": "Logging error when checking for new version of pip.\n### Description\n\nWhen pip (22.1.2) checked for a new version it failed with an error. It's coming from the following function:\r\n\r\nhttps://github.com/pypa/pip/blob/c4606b3572529625762f0586dda134302cf6122c/src/pip/_internal/utils/entrypoints.py#L46-L62\r\n\r\nThe problem call is to `os.path.samefile` on line 58, where it compares the output of `shutil.which('pip')` to `<sys.prefix>/bin/pip` (in my case `/usr/bin/pip`). However, on my system, `pip` is installed to the user site-packages directory (so the binary is at `/home/domdf/.local/bin/pip`).\r\n\r\nThe solution is to check whether the file exists before calling `samefile`.\r\n\r\nI have Python 3.7 and 3.9 installed to `/usr` alongside the system's Python 3.8, and the error is present with all three versions.\n\n### Expected behavior\n\nPip checks for a new version without an error.\n\n### pip version\n\n22.1.2\n\n### Python version\n\n3.9.13\n\n### OS\n\nUbuntu 20.04\n\n### How to Reproduce\n\n1. `pip install pip==22.1.2`\r\n2. `pip install pip` <- Any package will do.\r\n\n\n### Output\n\n```shell\n$ pip install pip\r\nDefaulting to user installation because normal site-packages is not writeable\r\nRequirement already satisfied: pip in ./.local/lib/python3.9/site-packages (22.1.2)\r\n--- Logging error ---\r\nTraceback (most recent call last):\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/logging.py\", line 177, in emit\r\n self.console.print(renderable, overflow=\"ignore\", crop=False, style=style)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/console.py\", line 1752, in print\r\n extend(render(renderable, render_options))\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/console.py\", line 1390, in render\r\n for render_output in iter_render:\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/logging.py\", line 134, in __rich_console__\r\n for line in lines:\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/segment.py\", line 245, in split_lines\r\n for segment in segments:\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/console.py\", line 1368, in render\r\n renderable = rich_cast(renderable)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_vendor/rich/protocol.py\", line 36, in rich_cast\r\n renderable = cast_method()\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/self_outdated_check.py\", line 130, in __rich__\r\n pip_cmd = get_best_invocation_for_this_pip()\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/entrypoints.py\", line 58, in get_best_invocation_for_this_pip\r\n if found_executable and os.path.samefile(\r\n File \"/usr/lib/python3.9/genericpath.py\", line 101, in samefile\r\n s2 = os.stat(f2)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/usr/bin/pip'\r\nCall stack:\r\n File \"/usr/lib/python3.9/runpy.py\", line 197, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/lib/python3.9/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/__main__.py\", line 31, in <module>\r\n sys.exit(_main())\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/main.py\", line 70, in main\r\n return command.main(cmd_args)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/base_command.py\", line 101, in main\r\n return self._main(args)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/base_command.py\", line 223, in _main\r\n self.handle_pip_version_check(options)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/cli/req_command.py\", line 148, in handle_pip_version_check\r\n pip_self_version_check(session, options)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/self_outdated_check.py\", line 237, in pip_self_version_check\r\n logger.info(\"[present-rich] %s\", upgrade_prompt)\r\n File \"/usr/lib/python3.9/logging/__init__.py\", line 1446, in info\r\n self._log(INFO, msg, args, **kwargs)\r\n File \"/usr/lib/python3.9/logging/__init__.py\", line 1589, in _log\r\n self.handle(record)\r\n File \"/usr/lib/python3.9/logging/__init__.py\", line 1599, in handle\r\n self.callHandlers(record)\r\n File \"/usr/lib/python3.9/logging/__init__.py\", line 1661, in callHandlers\r\n hdlr.handle(record)\r\n File \"/usr/lib/python3.9/logging/__init__.py\", line 952, in handle\r\n self.emit(record)\r\n File \"/home/domdf/.local/lib/python3.9/site-packages/pip/_internal/utils/logging.py\", line 179, in emit\r\n self.handleError(record)\r\nMessage: '[present-rich] %s'\r\nArguments: (UpgradePrompt(old='22.1.2', new='22.2'),)\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).\n", "before_files": [{"content": "import itertools\nimport os\nimport shutil\nimport sys\nfrom typing import List, Optional\n\nfrom pip._internal.cli.main import main\nfrom pip._internal.utils.compat import WINDOWS\n\n_EXECUTABLE_NAMES = [\n \"pip\",\n f\"pip{sys.version_info.major}\",\n f\"pip{sys.version_info.major}.{sys.version_info.minor}\",\n]\nif WINDOWS:\n _allowed_extensions = {\"\", \".exe\"}\n _EXECUTABLE_NAMES = [\n \"\".join(parts)\n for parts in itertools.product(_EXECUTABLE_NAMES, _allowed_extensions)\n ]\n\n\ndef _wrapper(args: Optional[List[str]] = None) -> int:\n \"\"\"Central wrapper for all old entrypoints.\n\n Historically pip has had several entrypoints defined. Because of issues\n arising from PATH, sys.path, multiple Pythons, their interactions, and most\n of them having a pip installed, users suffer every time an entrypoint gets\n moved.\n\n To alleviate this pain, and provide a mechanism for warning users and\n directing them to an appropriate place for help, we now define all of\n our old entrypoints as wrappers for the current one.\n \"\"\"\n sys.stderr.write(\n \"WARNING: pip is being invoked by an old script wrapper. This will \"\n \"fail in a future version of pip.\\n\"\n \"Please see https://github.com/pypa/pip/issues/5599 for advice on \"\n \"fixing the underlying issue.\\n\"\n \"To avoid this problem you can invoke Python with '-m pip' instead of \"\n \"running pip directly.\\n\"\n )\n return main(args)\n\n\ndef get_best_invocation_for_this_pip() -> str:\n \"\"\"Try to figure out the best way to invoke pip in the current environment.\"\"\"\n binary_directory = \"Scripts\" if WINDOWS else \"bin\"\n binary_prefix = os.path.join(sys.prefix, binary_directory)\n\n # Try to use pip[X[.Y]] names, if those executables for this environment are\n # the first on PATH with that name.\n path_parts = os.path.normcase(os.environ.get(\"PATH\", \"\")).split(os.pathsep)\n exe_are_in_PATH = os.path.normcase(binary_prefix) in path_parts\n if exe_are_in_PATH:\n for exe_name in _EXECUTABLE_NAMES:\n found_executable = shutil.which(exe_name)\n if found_executable and os.path.samefile(\n found_executable,\n os.path.join(binary_prefix, exe_name),\n ):\n return exe_name\n\n # Use the `-m` invocation, if there's no \"nice\" invocation.\n return f\"{get_best_invocation_for_this_python()} -m pip\"\n\n\ndef get_best_invocation_for_this_python() -> str:\n \"\"\"Try to figure out the best way to invoke the current Python.\"\"\"\n exe = sys.executable\n exe_name = os.path.basename(exe)\n\n # Try to use the basename, if it's the first executable.\n found_executable = shutil.which(exe_name)\n if found_executable and os.path.samefile(found_executable, exe):\n return exe_name\n\n # Use the full executable name, because we couldn't find something simpler.\n return exe\n", "path": "src/pip/_internal/utils/entrypoints.py"}]} | 2,820 | 188 |
gh_patches_debug_22275 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-39 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor how version is defined
Currently we just have a legacy `version.py` file with version inside it.
It duplicates the version information from `pyproject.toml`.
That's how it should be: https://github.com/sdispater/poetry/issues/273#issuecomment-401983643
</issue>
<code>
[start of wemake_python_styleguide/version.py]
1 # -*- coding: utf-8 -*-
2
3 __version__ = '0.0.2' # noqa
4 # TODO: resolve after https://github.com/sdispater/poetry/issues/273
5
[end of wemake_python_styleguide/version.py]
[start of wemake_python_styleguide/checker.py]
1 # -*- coding: utf-8 -*-
2
3 from ast import Module
4 from typing import Generator, Tuple
5
6 from wemake_python_styleguide.version import __version__
7 from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor
8 from wemake_python_styleguide.visitors.wrong_function_call import (
9 WrongFunctionCallVisitor,
10 )
11 from wemake_python_styleguide.visitors.wrong_import import WrongImportVisitor
12 from wemake_python_styleguide.visitors.wrong_keyword import (
13 WrongKeywordVisitor,
14 WrongRaiseVisitor,
15 )
16 from wemake_python_styleguide.visitors.wrong_name import (
17 WrongModuleMetadataVisitor,
18 WrongNameVisitor,
19 )
20 from wemake_python_styleguide.visitors.wrong_nested import WrongNestedVisitor
21
22 CheckResult = Tuple[int, int, str, type]
23
24
25 class Checker(object):
26 """
27 Main checker class.
28
29 Runs all possible checks.
30 """
31
32 name = 'wemake-python-styleguide'
33 version = __version__
34
35 def __init__(self, tree: Module, filename: str = '-') -> None:
36 """Creates new checker instance."""
37 self.tree = tree
38 self.filename = filename
39
40 self._visitors = (
41 WrongRaiseVisitor,
42 WrongFunctionCallVisitor,
43 WrongImportVisitor,
44 WrongKeywordVisitor,
45 WrongNestedVisitor,
46 ComplexityVisitor,
47 WrongNameVisitor,
48 WrongModuleMetadataVisitor,
49 )
50
51 def run(self) -> Generator[CheckResult, None, None]:
52 """
53 Runs the checker.
54
55 This method is used by `flake8` API.
56 """
57 for visitor_class in self._visitors:
58 visiter = visitor_class()
59 visiter.visit(self.tree)
60
61 for error in visiter.errors:
62 lineno, col_offset, message = error.node_items()
63 yield lineno, col_offset, message, type(self)
64
[end of wemake_python_styleguide/checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py
--- a/wemake_python_styleguide/checker.py
+++ b/wemake_python_styleguide/checker.py
@@ -3,7 +3,7 @@
from ast import Module
from typing import Generator, Tuple
-from wemake_python_styleguide.version import __version__
+from wemake_python_styleguide.version import version
from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor
from wemake_python_styleguide.visitors.wrong_function_call import (
WrongFunctionCallVisitor,
@@ -30,7 +30,7 @@
"""
name = 'wemake-python-styleguide'
- version = __version__
+ version = version
def __init__(self, tree: Module, filename: str = '-') -> None:
"""Creates new checker instance."""
diff --git a/wemake_python_styleguide/version.py b/wemake_python_styleguide/version.py
--- a/wemake_python_styleguide/version.py
+++ b/wemake_python_styleguide/version.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
-__version__ = '0.0.2' # noqa
-# TODO: resolve after https://github.com/sdispater/poetry/issues/273
+import pkg_resources
+
+version = pkg_resources.get_distribution('wemake-python-styleguide').version
| {"golden_diff": "diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py\n--- a/wemake_python_styleguide/checker.py\n+++ b/wemake_python_styleguide/checker.py\n@@ -3,7 +3,7 @@\n from ast import Module\n from typing import Generator, Tuple\n \n-from wemake_python_styleguide.version import __version__\n+from wemake_python_styleguide.version import version\n from wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor\n from wemake_python_styleguide.visitors.wrong_function_call import (\n WrongFunctionCallVisitor,\n@@ -30,7 +30,7 @@\n \"\"\"\n \n name = 'wemake-python-styleguide'\n- version = __version__\n+ version = version\n \n def __init__(self, tree: Module, filename: str = '-') -> None:\n \"\"\"Creates new checker instance.\"\"\"\ndiff --git a/wemake_python_styleguide/version.py b/wemake_python_styleguide/version.py\n--- a/wemake_python_styleguide/version.py\n+++ b/wemake_python_styleguide/version.py\n@@ -1,4 +1,5 @@\n # -*- coding: utf-8 -*-\n \n-__version__ = '0.0.2' # noqa\n-# TODO: resolve after https://github.com/sdispater/poetry/issues/273\n+import pkg_resources\n+\n+version = pkg_resources.get_distribution('wemake-python-styleguide').version\n", "issue": "Refactor how version is defined\nCurrently we just have a legacy `version.py` file with version inside it.\r\nIt duplicates the version information from `pyproject.toml`.\r\n\r\nThat's how it should be: https://github.com/sdispater/poetry/issues/273#issuecomment-401983643\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n__version__ = '0.0.2' # noqa\n# TODO: resolve after https://github.com/sdispater/poetry/issues/273\n", "path": "wemake_python_styleguide/version.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom ast import Module\nfrom typing import Generator, Tuple\n\nfrom wemake_python_styleguide.version import __version__\nfrom wemake_python_styleguide.visitors.high_complexity import ComplexityVisitor\nfrom wemake_python_styleguide.visitors.wrong_function_call import (\n WrongFunctionCallVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_import import WrongImportVisitor\nfrom wemake_python_styleguide.visitors.wrong_keyword import (\n WrongKeywordVisitor,\n WrongRaiseVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_name import (\n WrongModuleMetadataVisitor,\n WrongNameVisitor,\n)\nfrom wemake_python_styleguide.visitors.wrong_nested import WrongNestedVisitor\n\nCheckResult = Tuple[int, int, str, type]\n\n\nclass Checker(object):\n \"\"\"\n Main checker class.\n\n Runs all possible checks.\n \"\"\"\n\n name = 'wemake-python-styleguide'\n version = __version__\n\n def __init__(self, tree: Module, filename: str = '-') -> None:\n \"\"\"Creates new checker instance.\"\"\"\n self.tree = tree\n self.filename = filename\n\n self._visitors = (\n WrongRaiseVisitor,\n WrongFunctionCallVisitor,\n WrongImportVisitor,\n WrongKeywordVisitor,\n WrongNestedVisitor,\n ComplexityVisitor,\n WrongNameVisitor,\n WrongModuleMetadataVisitor,\n )\n\n def run(self) -> Generator[CheckResult, None, None]:\n \"\"\"\n Runs the checker.\n\n This method is used by `flake8` API.\n \"\"\"\n for visitor_class in self._visitors:\n visiter = visitor_class()\n visiter.visit(self.tree)\n\n for error in visiter.errors:\n lineno, col_offset, message = error.node_items()\n yield lineno, col_offset, message, type(self)\n", "path": "wemake_python_styleguide/checker.py"}]} | 1,202 | 320 |
gh_patches_debug_4756 | rasdani/github-patches | git_diff | keras-team__keras-core-579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using torch backend
Using PyTorch backend.
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - mean_absolute_error: 0.4083 - loss: 0.2566
Epoch 2/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.3805 - loss: 0.2151
Epoch 3/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.3704 - loss: 0.2056
Epoch 1/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2699 - mae: 0.4200
Epoch 2/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2409 - mae: 0.3940
Epoch 3/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2271 - mae: 0.3856
Epoch 4/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2174 - mae: 0.3785
Epoch 5/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.2120 - mae: 0.3699
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.7020 - loss: 0.3334
Epoch 2/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.4075 - loss: 0.1271
Epoch 3/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - mean_absolute_error: 0.3776 - loss: 0.1010
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mean_absolute_error: 0.8608 - loss: 0.9672
Traceback (most recent call last):
File "E:\custom_train_step_in_torch.py", line 483, in <module>
gan.fit(dataloader, epochs=1)
File "C:\Python_310\lib\site-packages\keras_core\src\utils\traceback_utils.py", line 123, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Python_310\lib\site-packages\keras_core\src\utils\module_utils.py", line 26, in initialize
raise ImportError(
ImportError: This requires the tensorflow module. You can install it via `pip install tensorflow`
</issue>
<code>
[start of keras_core/utils/module_utils.py]
1 import importlib
2
3
4 class LazyModule:
5 def __init__(self, name, pip_name=None):
6 self.name = name
7 pip_name = pip_name or name
8 self.pip_name = pip_name
9 self.module = None
10 self._available = None
11
12 @property
13 def available(self):
14 if self._available is None:
15 try:
16 self.initialize()
17 except ImportError:
18 self._available = False
19 self._available = True
20 return self._available
21
22 def initialize(self):
23 try:
24 self.module = importlib.import_module(self.name)
25 except ImportError:
26 raise ImportError(
27 f"This requires the {self.name} module. "
28 f"You can install it via `pip install {self.pip_name}`"
29 )
30
31 def __getattr__(self, name):
32 if self.module is None:
33 self.initialize()
34 return getattr(self.module, name)
35
36
37 tensorflow = LazyModule("tensorflow")
38 gfile = LazyModule("tensorflow.io.gfile")
39
[end of keras_core/utils/module_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras_core/utils/module_utils.py b/keras_core/utils/module_utils.py
--- a/keras_core/utils/module_utils.py
+++ b/keras_core/utils/module_utils.py
@@ -14,9 +14,9 @@
if self._available is None:
try:
self.initialize()
+ self._available = True
except ImportError:
self._available = False
- self._available = True
return self._available
def initialize(self):
| {"golden_diff": "diff --git a/keras_core/utils/module_utils.py b/keras_core/utils/module_utils.py\n--- a/keras_core/utils/module_utils.py\n+++ b/keras_core/utils/module_utils.py\n@@ -14,9 +14,9 @@\n if self._available is None:\n try:\n self.initialize()\n+ self._available = True\n except ImportError:\n self._available = False\n- self._available = True\n return self._available\n \n def initialize(self):\n", "issue": "Using torch backend\nUsing PyTorch backend.\r\nEpoch 1/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 1s 4ms/step - mean_absolute_error: 0.4083 - loss: 0.2566\r\nEpoch 2/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.3805 - loss: 0.2151\r\nEpoch 3/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.3704 - loss: 0.2056\r\nEpoch 1/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2699 - mae: 0.4200\r\nEpoch 2/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2409 - mae: 0.3940\r\nEpoch 3/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2271 - mae: 0.3856\r\nEpoch 4/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2174 - mae: 0.3785\r\nEpoch 5/5\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - loss: 0.2120 - mae: 0.3699\r\nEpoch 1/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.7020 - loss: 0.3334\r\nEpoch 2/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.4075 - loss: 0.1271\r\nEpoch 3/3\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 4ms/step - mean_absolute_error: 0.3776 - loss: 0.1010\r\n32/32 \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 0s 2ms/step - mean_absolute_error: 0.8608 - loss: 0.9672\r\nTraceback (most recent call last):\r\n File \"E:\\custom_train_step_in_torch.py\", line 483, in <module>\r\n gan.fit(dataloader, epochs=1)\r\n File \"C:\\Python_310\\lib\\site-packages\\keras_core\\src\\utils\\traceback_utils.py\", line 123, in error_handler\r\n raise e.with_traceback(filtered_tb) from None\r\n File \"C:\\Python_310\\lib\\site-packages\\keras_core\\src\\utils\\module_utils.py\", line 26, in initialize\r\n raise ImportError(\r\nImportError: This requires the tensorflow module. You can install it via `pip install tensorflow`\n", "before_files": [{"content": "import importlib\n\n\nclass LazyModule:\n def __init__(self, name, pip_name=None):\n self.name = name\n pip_name = pip_name or name\n self.pip_name = pip_name\n self.module = None\n self._available = None\n\n @property\n def available(self):\n if self._available is None:\n try:\n self.initialize()\n except ImportError:\n self._available = False\n self._available = True\n return self._available\n\n def initialize(self):\n try:\n self.module = importlib.import_module(self.name)\n except ImportError:\n raise ImportError(\n f\"This requires the {self.name} module. \"\n f\"You can install it via `pip install {self.pip_name}`\"\n )\n\n def __getattr__(self, name):\n if self.module is None:\n self.initialize()\n return getattr(self.module, name)\n\n\ntensorflow = LazyModule(\"tensorflow\")\ngfile = LazyModule(\"tensorflow.io.gfile\")\n", "path": "keras_core/utils/module_utils.py"}]} | 1,621 | 109 |
gh_patches_debug_25289 | rasdani/github-patches | git_diff | easybuilders__easybuild-framework-4551 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Environment variable change in module cray-libsci of CPE 23.12
Hi, I report a bug affecting EasyBuild on Cray systems (file [libsci.py](https://github.com/easybuilders/easybuild-framework/blob/develop/easybuild/toolchains/linalg/libsci.py)) with the Cray Programming Environment (CPE) 23.12. The bug should be fixed in CPE 24.03 according to HPE/Cray staff, therefore the impact is limited:
- The environment variable name referenced in [line 68](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L68) changed from `CRAY_LIBSCI_PREFIX_DIR` to `CRAY_PE_LIBSCI_PREFIX_DIR`
- I have manually fixed [line 69](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L69) using the workaround below:
`root = os.getenv('CRAY_LIBSCI_PREFIX_DIR', None) or os.getenv('CRAY_PE_LIBSCI_PREFIX_DIR', None)`
The environment variable name should be fixed back to the original one in CPE 24.03 (I did not have the chance to test it yet, though). Since CPE variable names change sometimes, it might be useful to give the option to read the `prefix` of the external module `cray-libsci` from a [metadata file](https://docs.easybuild.io/using-external-modules/?h=metadata#using_external_modules_metadata) instead of having it hard coded.
</issue>
<code>
[start of easybuild/toolchains/linalg/libsci.py]
1 ##
2 # Copyright 2014-2024 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 Support for Cray's LibSci library, which provides BLAS/LAPACK support.
27 cfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
28
29 Authors:
30
31 * Petar Forai (IMP/IMBA, Austria)
32 * Kenneth Hoste (Ghent University)
33 """
34 import os
35
36 from easybuild.tools.build_log import EasyBuildError
37 from easybuild.tools.toolchain.linalg import LinAlg
38
39
40 CRAY_LIBSCI_MODULE_NAME = 'cray-libsci'
41 TC_CONSTANT_CRAY_LIBSCI = 'CrayLibSci'
42
43
44 class LibSci(LinAlg):
45 """Support for Cray's LibSci library, which provides BLAS/LAPACK support."""
46 # BLAS/LAPACK support
47 # via cray-libsci module, which gets loaded via the PrgEnv module
48 # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/
49 BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
50
51 # no need to specify libraries, compiler driver takes care of linking the right libraries
52 # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.
53 BLAS_LIB = ['']
54 BLAS_LIB_MT = ['']
55 BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI
56
57 LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
58 LAPACK_IS_BLAS = True
59 LAPACK_FAMILY = TC_CONSTANT_CRAY_LIBSCI
60
61 BLACS_MODULE_NAME = []
62 SCALAPACK_MODULE_NAME = []
63
64 def _get_software_root(self, name, required=True):
65 """Get install prefix for specified software name; special treatment for Cray modules."""
66 if name == 'cray-libsci':
67 # Cray-provided LibSci module
68 env_var = 'CRAY_LIBSCI_PREFIX_DIR'
69 root = os.getenv(env_var, None)
70 if root is None:
71 if required:
72 raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var)
73 else:
74 self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
75 else:
76 root = super(LibSci, self)._get_software_root(name, required=required)
77
78 return root
79
80 def _set_blacs_variables(self):
81 """Skip setting BLACS related variables"""
82 pass
83
84 def _set_scalapack_variables(self):
85 """Skip setting ScaLAPACK related variables"""
86 pass
87
88 def definition(self):
89 """
90 Filter BLAS module from toolchain definition.
91 The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,
92 and thus is not a direct toolchain component.
93 """
94 tc_def = super(LibSci, self).definition()
95 tc_def['BLAS'] = []
96 tc_def['LAPACK'] = []
97 return tc_def
98
[end of easybuild/toolchains/linalg/libsci.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/toolchains/linalg/libsci.py b/easybuild/toolchains/linalg/libsci.py
--- a/easybuild/toolchains/linalg/libsci.py
+++ b/easybuild/toolchains/linalg/libsci.py
@@ -65,13 +65,20 @@
"""Get install prefix for specified software name; special treatment for Cray modules."""
if name == 'cray-libsci':
# Cray-provided LibSci module
- env_var = 'CRAY_LIBSCI_PREFIX_DIR'
- root = os.getenv(env_var, None)
+ root = None
+ # consider both $CRAY_LIBSCI_PREFIX_DIR and $CRAY_PE_LIBSCI_PREFIX_DIR,
+ # cfr. https://github.com/easybuilders/easybuild-framework/issues/4536
+ env_vars = ('CRAY_LIBSCI_PREFIX_DIR', 'CRAY_PE_LIBSCI_PREFIX_DIR')
+ for env_var in env_vars:
+ root = os.getenv(env_var, None)
+ if root is not None:
+ self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
+ break
+
if root is None:
if required:
- raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var)
- else:
- self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root)
+ env_vars_str = ', '.join('$' + e for e in env_vars)
+ raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_vars_str)
else:
root = super(LibSci, self)._get_software_root(name, required=required)
| {"golden_diff": "diff --git a/easybuild/toolchains/linalg/libsci.py b/easybuild/toolchains/linalg/libsci.py\n--- a/easybuild/toolchains/linalg/libsci.py\n+++ b/easybuild/toolchains/linalg/libsci.py\n@@ -65,13 +65,20 @@\n \"\"\"Get install prefix for specified software name; special treatment for Cray modules.\"\"\"\n if name == 'cray-libsci':\n # Cray-provided LibSci module\n- env_var = 'CRAY_LIBSCI_PREFIX_DIR'\n- root = os.getenv(env_var, None)\n+ root = None\n+ # consider both $CRAY_LIBSCI_PREFIX_DIR and $CRAY_PE_LIBSCI_PREFIX_DIR,\n+ # cfr. https://github.com/easybuilders/easybuild-framework/issues/4536\n+ env_vars = ('CRAY_LIBSCI_PREFIX_DIR', 'CRAY_PE_LIBSCI_PREFIX_DIR')\n+ for env_var in env_vars:\n+ root = os.getenv(env_var, None)\n+ if root is not None:\n+ self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n+ break\n+\n if root is None:\n if required:\n- raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_var)\n- else:\n- self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n+ env_vars_str = ', '.join('$' + e for e in env_vars)\n+ raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_vars_str)\n else:\n root = super(LibSci, self)._get_software_root(name, required=required)\n", "issue": "Environment variable change in module cray-libsci of CPE 23.12\nHi, I report a bug affecting EasyBuild on Cray systems (file [libsci.py](https://github.com/easybuilders/easybuild-framework/blob/develop/easybuild/toolchains/linalg/libsci.py)) with the Cray Programming Environment (CPE) 23.12. The bug should be fixed in CPE 24.03 according to HPE/Cray staff, therefore the impact is limited:\r\n- The environment variable name referenced in [line 68](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L68) changed from `CRAY_LIBSCI_PREFIX_DIR` to `CRAY_PE_LIBSCI_PREFIX_DIR`\r\n- I have manually fixed [line 69](https://github.com/easybuilders/easybuild-framework/blob/e4524c1c70e496e5886de7d4848bb8147eea84bd/easybuild/toolchains/linalg/libsci.py#L69) using the workaround below:\r\n `root = os.getenv('CRAY_LIBSCI_PREFIX_DIR', None) or os.getenv('CRAY_PE_LIBSCI_PREFIX_DIR', None)`\r\n\r\nThe environment variable name should be fixed back to the original one in CPE 24.03 (I did not have the chance to test it yet, though). Since CPE variable names change sometimes, it might be useful to give the option to read the `prefix` of the external module `cray-libsci` from a [metadata file](https://docs.easybuild.io/using-external-modules/?h=metadata#using_external_modules_metadata) instead of having it hard coded.\r\n\n", "before_files": [{"content": "##\n# Copyright 2014-2024 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nSupport for Cray's LibSci library, which provides BLAS/LAPACK support.\ncfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n\nAuthors:\n\n* Petar Forai (IMP/IMBA, Austria)\n* Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\n\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.toolchain.linalg import LinAlg\n\n\nCRAY_LIBSCI_MODULE_NAME = 'cray-libsci'\nTC_CONSTANT_CRAY_LIBSCI = 'CrayLibSci'\n\n\nclass LibSci(LinAlg):\n \"\"\"Support for Cray's LibSci library, which provides BLAS/LAPACK support.\"\"\"\n # BLAS/LAPACK support\n # via cray-libsci module, which gets loaded via the PrgEnv module\n # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n\n # no need to specify libraries, compiler driver takes care of linking the right libraries\n # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.\n BLAS_LIB = ['']\n BLAS_LIB_MT = ['']\n BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n LAPACK_IS_BLAS = True\n LAPACK_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n BLACS_MODULE_NAME = []\n SCALAPACK_MODULE_NAME = []\n\n def _get_software_root(self, name, required=True):\n \"\"\"Get install prefix for specified software name; special treatment for Cray modules.\"\"\"\n if name == 'cray-libsci':\n # Cray-provided LibSci module\n env_var = 'CRAY_LIBSCI_PREFIX_DIR'\n root = os.getenv(env_var, None)\n if root is None:\n if required:\n raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_var)\n else:\n self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n else:\n root = super(LibSci, self)._get_software_root(name, required=required)\n\n return root\n\n def _set_blacs_variables(self):\n \"\"\"Skip setting BLACS related variables\"\"\"\n pass\n\n def _set_scalapack_variables(self):\n \"\"\"Skip setting ScaLAPACK related variables\"\"\"\n pass\n\n def definition(self):\n \"\"\"\n Filter BLAS module from toolchain definition.\n The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,\n and thus is not a direct toolchain component.\n \"\"\"\n tc_def = super(LibSci, self).definition()\n tc_def['BLAS'] = []\n tc_def['LAPACK'] = []\n return tc_def\n", "path": "easybuild/toolchains/linalg/libsci.py"}]} | 2,054 | 399 |
gh_patches_debug_1251 | rasdani/github-patches | git_diff | chainer__chainer-987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix the shape of return value of F.det
Currently, return value of `det` is `xp.array` whose shape is `(1, )`, not a scalar.
```
In [16]: a = chainer.Variable(numpy.random.uniform(-1, 1, (3, 3)).astype(numpy.float32))
In [17]: chainer.functions.det(a).data
Out[17]: array([-0.80874199], dtype=float32)
```
But the document says the return value should be `chainer.Variable` whose data have the shape `()`.
</issue>
<code>
[start of chainer/functions/math/det.py]
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.functions.array import reshape
6 from chainer.functions.math import inv
7 from chainer.functions.math import matmul
8 from chainer import utils
9 from chainer.utils import type_check
10
11
12 def _det_gpu(b):
13 # We do a batched LU decomposition on the GPU to compute
14 # and compute the determinant by multiplying the diagonal.
15 # Change the shape of the array to be size=1 minibatch if necessary.
16 # Also copy the matrix as the elments will be modified in-place.
17 a = matmul._as_batch_mat(b).copy()
18 n = a.shape[1]
19 n_matrices = len(a)
20 # Pivot array
21 p = cuda.cupy.zeros((n_matrices, n), dtype='int32')
22 # Output array
23 # These arrays hold information on the execution success
24 # or if the matrix was singular.
25 info1 = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)
26 ap = matmul._mat_ptrs(a)
27 _, lda = matmul._get_ld(a)
28 cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,
29 p.data.ptr, info1.data.ptr, n_matrices)
30 det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)
31 # The determinant is equal to the product of the diagonal entries
32 # of `a` where the sign of `a` is flipped depending on whether
33 # the pivot array is equal to its index.
34 rng = cuda.cupy.arange(1, n + 1, dtype='int32')
35 parity = cuda.cupy.sum(p != rng, axis=1) % 2
36 sign = 1. - 2. * parity.astype('float32')
37 success = cuda.cupy.all(info1 == 0)
38 return det * sign, success
39
40
41 class BatchDet(function.Function):
42
43 @property
44 def label(self):
45 return 'det'
46
47 def check_type_forward(self, in_types):
48 type_check.expect(in_types.size() == 1)
49 a_type, = in_types
50 a_type = matmul._convert_type(a_type)
51 type_check.expect(a_type.dtype.kind == 'f')
52 # Only a minibatch of 2D array shapes allowed.
53 type_check.expect(a_type.ndim == 3)
54 # Matrix inversion only allowed for square matrices
55 # so assert the last two dimensions are equal.
56 type_check.expect(a_type.shape[-1] == a_type.shape[-2])
57
58 def forward_cpu(self, x):
59 self.detx = utils.force_array(numpy.linalg.det(x[0]))
60 return self.detx,
61
62 def forward_gpu(self, x):
63 self.detx, success = _det_gpu(x[0])
64 if not success:
65 raise ValueError('Singular Matrix')
66 return self.detx,
67
68 def backward_cpu(self, x, gy):
69 x, = x
70 gy, = gy
71 grad = (gy[:, None, None] * self.detx[:, None, None] *
72 numpy.linalg.inv(x.transpose((0, 2, 1))))
73 return utils.force_array(grad),
74
75 def backward_gpu(self, x, gy):
76 x, = x
77 gy, = gy
78 grad = (gy[:, None, None] * self.detx[:, None, None] *
79 inv._inv_gpu(x.transpose((0, 2, 1))))
80 return utils.force_array(grad),
81
82
83 def batch_det(a):
84 """Computes the determinant of a batch of square matrices.
85
86 Args:
87 a (Variable): Input array to compute the determinant for.
88 The first dimension should iterate over each matrix and be
89 of the batchsize.
90
91 Returns:
92 ~chainer.Variable: vector of determinants for every matrix
93 in the batch.
94
95 """
96 return BatchDet()(a)
97
98
99 def det(a):
100 """Computes the determinant of a single square matrix.
101
102 Args:
103 a (Variable): Input array to compute the determinant for.
104
105 Returns:
106 ~chainer.Variable: Scalar determinant of the matrix a.
107
108 """
109 shape = (1, len(a.data), a.data.shape[1])
110 batched_a = reshape.Reshape(shape)(a)
111 batched_det = BatchDet()(batched_a)
112 return reshape.Reshape((1, ))(batched_det)
113
[end of chainer/functions/math/det.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/math/det.py b/chainer/functions/math/det.py
--- a/chainer/functions/math/det.py
+++ b/chainer/functions/math/det.py
@@ -109,4 +109,4 @@
shape = (1, len(a.data), a.data.shape[1])
batched_a = reshape.Reshape(shape)(a)
batched_det = BatchDet()(batched_a)
- return reshape.Reshape((1, ))(batched_det)
+ return reshape.Reshape(())(batched_det)
| {"golden_diff": "diff --git a/chainer/functions/math/det.py b/chainer/functions/math/det.py\n--- a/chainer/functions/math/det.py\n+++ b/chainer/functions/math/det.py\n@@ -109,4 +109,4 @@\n shape = (1, len(a.data), a.data.shape[1])\n batched_a = reshape.Reshape(shape)(a)\n batched_det = BatchDet()(batched_a)\n- return reshape.Reshape((1, ))(batched_det)\n+ return reshape.Reshape(())(batched_det)\n", "issue": "Fix the shape of return value of F.det\nCurrently, return value of `det` is `xp.array` whose shape is `(1, )`, not a scalar.\n\n```\nIn [16]: a = chainer.Variable(numpy.random.uniform(-1, 1, (3, 3)).astype(numpy.float32))\nIn [17]: chainer.functions.det(a).data\nOut[17]: array([-0.80874199], dtype=float32)\n```\n\nBut the document says the return value should be `chainer.Variable` whose data have the shape `()`.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions.array import reshape\nfrom chainer.functions.math import inv\nfrom chainer.functions.math import matmul\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\ndef _det_gpu(b):\n # We do a batched LU decomposition on the GPU to compute\n # and compute the determinant by multiplying the diagonal.\n # Change the shape of the array to be size=1 minibatch if necessary.\n # Also copy the matrix as the elments will be modified in-place.\n a = matmul._as_batch_mat(b).copy()\n n = a.shape[1]\n n_matrices = len(a)\n # Pivot array\n p = cuda.cupy.zeros((n_matrices, n), dtype='int32')\n # Output array\n # These arrays hold information on the execution success\n # or if the matrix was singular.\n info1 = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)\n ap = matmul._mat_ptrs(a)\n _, lda = matmul._get_ld(a)\n cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,\n p.data.ptr, info1.data.ptr, n_matrices)\n det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)\n # The determinant is equal to the product of the diagonal entries\n # of `a` where the sign of `a` is flipped depending on whether\n # the pivot array is equal to its index.\n rng = cuda.cupy.arange(1, n + 1, dtype='int32')\n parity = cuda.cupy.sum(p != rng, axis=1) % 2\n sign = 1. - 2. * parity.astype('float32')\n success = cuda.cupy.all(info1 == 0)\n return det * sign, success\n\n\nclass BatchDet(function.Function):\n\n @property\n def label(self):\n return 'det'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n a_type = matmul._convert_type(a_type)\n type_check.expect(a_type.dtype.kind == 'f')\n # Only a minibatch of 2D array shapes allowed.\n type_check.expect(a_type.ndim == 3)\n # Matrix inversion only allowed for square matrices\n # so assert the last two dimensions are equal.\n type_check.expect(a_type.shape[-1] == a_type.shape[-2])\n\n def forward_cpu(self, x):\n self.detx = utils.force_array(numpy.linalg.det(x[0]))\n return self.detx,\n\n def forward_gpu(self, x):\n self.detx, success = _det_gpu(x[0])\n if not success:\n raise ValueError('Singular Matrix')\n return self.detx,\n\n def backward_cpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n numpy.linalg.inv(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n def backward_gpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n inv._inv_gpu(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n\ndef batch_det(a):\n \"\"\"Computes the determinant of a batch of square matrices.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n The first dimension should iterate over each matrix and be\n of the batchsize.\n\n Returns:\n ~chainer.Variable: vector of determinants for every matrix\n in the batch.\n\n \"\"\"\n return BatchDet()(a)\n\n\ndef det(a):\n \"\"\"Computes the determinant of a single square matrix.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n\n Returns:\n ~chainer.Variable: Scalar determinant of the matrix a.\n\n \"\"\"\n shape = (1, len(a.data), a.data.shape[1])\n batched_a = reshape.Reshape(shape)(a)\n batched_det = BatchDet()(batched_a)\n return reshape.Reshape((1, ))(batched_det)\n", "path": "chainer/functions/math/det.py"}]} | 1,863 | 122 |
gh_patches_debug_9189 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-1505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Project inference is broken under Python 3.5.1
Project inference takes place in [_determine_default_project()](https://github.com/GoogleCloudPlatform/gcloud-python/blob/91be6938b26ba9198082f457ae37fba81b8f5ea0/gcloud/_helpers.py#L189), which hands off to [_compute_engine_id()](https://github.com/GoogleCloudPlatform/gcloud-python/blob/91be6938b26ba9198082f457ae37fba81b8f5ea0/gcloud/_helpers.py#L151). That returns the correct value -- but as `bytes`. The `Client` class checks if the project value is a `str` (using `six.string_types`) and raises an error because it is not (that code is [here](https://github.com/GoogleCloudPlatform/gcloud-python/blob/91be6938b26ba9198082f457ae37fba81b8f5ea0/gcloud/client.py#L144)).
</issue>
<code>
[start of gcloud/client.py]
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Base classes for client used to interact with Google Cloud APIs."""
16
17 import six
18
19 from gcloud._helpers import _determine_default_project
20 from gcloud.connection import Connection
21 from gcloud.credentials import get_credentials
22 from gcloud.credentials import get_for_service_account_json
23 from gcloud.credentials import get_for_service_account_p12
24
25
26 class _ClientFactoryMixin(object):
27 """Mixin to allow factories that create credentials.
28
29 .. note::
30
31 This class is virtual.
32 """
33
34 @classmethod
35 def from_service_account_json(cls, json_credentials_path, *args, **kwargs):
36 """Factory to retrieve JSON credentials while creating client.
37
38 :type json_credentials_path: string
39 :param json_credentials_path: The path to a private key file (this file
40 was given to you when you created the
41 service account). This file must contain
42 a JSON object with a private key and
43 other credentials information (downloaded
44 from the Google APIs console).
45
46 :type args: tuple
47 :param args: Remaining positional arguments to pass to constructor.
48
49 :type kwargs: dict
50 :param kwargs: Remaining keyword arguments to pass to constructor.
51
52 :rtype: :class:`gcloud.pubsub.client.Client`
53 :returns: The client created with the retrieved JSON credentials.
54 :raises: :class:`TypeError` if there is a conflict with the kwargs
55 and the credentials created by the factory.
56 """
57 if 'credentials' in kwargs:
58 raise TypeError('credentials must not be in keyword arguments')
59 credentials = get_for_service_account_json(json_credentials_path)
60 kwargs['credentials'] = credentials
61 return cls(*args, **kwargs)
62
63 @classmethod
64 def from_service_account_p12(cls, client_email, private_key_path,
65 *args, **kwargs):
66 """Factory to retrieve P12 credentials while creating client.
67
68 .. note::
69 Unless you have an explicit reason to use a PKCS12 key for your
70 service account, we recommend using a JSON key.
71
72 :type client_email: string
73 :param client_email: The e-mail attached to the service account.
74
75 :type private_key_path: string
76 :param private_key_path: The path to a private key file (this file was
77 given to you when you created the service
78 account). This file must be in P12 format.
79
80 :type args: tuple
81 :param args: Remaining positional arguments to pass to constructor.
82
83 :type kwargs: dict
84 :param kwargs: Remaining keyword arguments to pass to constructor.
85
86 :rtype: :class:`gcloud.client.Client`
87 :returns: The client created with the retrieved P12 credentials.
88 :raises: :class:`TypeError` if there is a conflict with the kwargs
89 and the credentials created by the factory.
90 """
91 if 'credentials' in kwargs:
92 raise TypeError('credentials must not be in keyword arguments')
93 credentials = get_for_service_account_p12(client_email,
94 private_key_path)
95 kwargs['credentials'] = credentials
96 return cls(*args, **kwargs)
97
98
99 class Client(_ClientFactoryMixin):
100 """Client to bundle configuration needed for API requests.
101
102 Assumes that the associated ``_connection_class`` only accepts
103 ``http`` and ``credentials`` in its constructor.
104
105 :type credentials: :class:`oauth2client.client.OAuth2Credentials` or
106 :class:`NoneType`
107 :param credentials: The OAuth2 Credentials to use for the connection
108 owned by this client. If not passed (and if no ``http``
109 object is passed), falls back to the default inferred
110 from the environment.
111
112 :type http: :class:`httplib2.Http` or class that defines ``request()``.
113 :param http: An optional HTTP object to make requests. If not passed, an
114 ``http`` object is created that is bound to the
115 ``credentials`` for the current object.
116 """
117
118 _connection_class = Connection
119
120 def __init__(self, credentials=None, http=None):
121 if credentials is None and http is None:
122 credentials = get_credentials()
123 self.connection = self._connection_class(
124 credentials=credentials, http=http)
125
126
127 class _ClientProjectMixin(object):
128 """Mixin to allow setting the project on the client.
129
130 :type project: string
131 :param project: the project which the client acts on behalf of. If not
132 passed falls back to the default inferred from the
133 environment.
134
135 :raises: :class:`ValueError` if the project is neither passed in nor
136 set in the environment.
137 """
138
139 def __init__(self, project=None):
140 project = _determine_default_project(project)
141 if project is None:
142 raise ValueError('Project was not passed and could not be '
143 'determined from the environment.')
144 if not isinstance(project, six.string_types):
145 raise ValueError('Project must be a string.')
146 self.project = project
147
148
149 class JSONClient(Client, _ClientProjectMixin):
150 """Client to for Google JSON-based API.
151
152 Assumes such APIs use the ``project`` and the client needs to store this
153 value.
154
155 :type project: string
156 :param project: the project which the client acts on behalf of. If not
157 passed falls back to the default inferred from the
158 environment.
159
160 :type credentials: :class:`oauth2client.client.OAuth2Credentials` or
161 :class:`NoneType`
162 :param credentials: The OAuth2 Credentials to use for the connection
163 owned by this client. If not passed (and if no ``http``
164 object is passed), falls back to the default inferred
165 from the environment.
166
167 :type http: :class:`httplib2.Http` or class that defines ``request()``.
168 :param http: An optional HTTP object to make requests. If not passed, an
169 ``http`` object is created that is bound to the
170 ``credentials`` for the current object.
171
172 :raises: :class:`ValueError` if the project is neither passed in nor
173 set in the environment.
174 """
175
176 def __init__(self, project=None, credentials=None, http=None):
177 _ClientProjectMixin.__init__(self, project=project)
178 Client.__init__(self, credentials=credentials, http=http)
179
[end of gcloud/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/client.py b/gcloud/client.py
--- a/gcloud/client.py
+++ b/gcloud/client.py
@@ -141,6 +141,8 @@
if project is None:
raise ValueError('Project was not passed and could not be '
'determined from the environment.')
+ if isinstance(project, six.binary_type):
+ project = project.decode('utf-8')
if not isinstance(project, six.string_types):
raise ValueError('Project must be a string.')
self.project = project
| {"golden_diff": "diff --git a/gcloud/client.py b/gcloud/client.py\n--- a/gcloud/client.py\n+++ b/gcloud/client.py\n@@ -141,6 +141,8 @@\n if project is None:\n raise ValueError('Project was not passed and could not be '\n 'determined from the environment.')\n+ if isinstance(project, six.binary_type):\n+ project = project.decode('utf-8')\n if not isinstance(project, six.string_types):\n raise ValueError('Project must be a string.')\n self.project = project\n", "issue": "Project inference is broken under Python 3.5.1\nProject inference takes place in [_determine_default_project()](https://github.com/GoogleCloudPlatform/gcloud-python/blob/91be6938b26ba9198082f457ae37fba81b8f5ea0/gcloud/_helpers.py#L189), which hands off to [_compute_engine_id()](https://github.com/GoogleCloudPlatform/gcloud-python/blob/91be6938b26ba9198082f457ae37fba81b8f5ea0/gcloud/_helpers.py#L151). That returns the correct value -- but as `bytes`. The `Client` class checks if the project value is a `str` (using `six.string_types`) and raises an error because it is not (that code is [here](https://github.com/GoogleCloudPlatform/gcloud-python/blob/91be6938b26ba9198082f457ae37fba81b8f5ea0/gcloud/client.py#L144)).\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base classes for client used to interact with Google Cloud APIs.\"\"\"\n\nimport six\n\nfrom gcloud._helpers import _determine_default_project\nfrom gcloud.connection import Connection\nfrom gcloud.credentials import get_credentials\nfrom gcloud.credentials import get_for_service_account_json\nfrom gcloud.credentials import get_for_service_account_p12\n\n\nclass _ClientFactoryMixin(object):\n \"\"\"Mixin to allow factories that create credentials.\n\n .. note::\n\n This class is virtual.\n \"\"\"\n\n @classmethod\n def from_service_account_json(cls, json_credentials_path, *args, **kwargs):\n \"\"\"Factory to retrieve JSON credentials while creating client.\n\n :type json_credentials_path: string\n :param json_credentials_path: The path to a private key file (this file\n was given to you when you created the\n service account). This file must contain\n a JSON object with a private key and\n other credentials information (downloaded\n from the Google APIs console).\n\n :type args: tuple\n :param args: Remaining positional arguments to pass to constructor.\n\n :type kwargs: dict\n :param kwargs: Remaining keyword arguments to pass to constructor.\n\n :rtype: :class:`gcloud.pubsub.client.Client`\n :returns: The client created with the retrieved JSON credentials.\n :raises: :class:`TypeError` if there is a conflict with the kwargs\n and the credentials created by the factory.\n \"\"\"\n if 'credentials' in kwargs:\n raise TypeError('credentials must not be in keyword arguments')\n credentials = get_for_service_account_json(json_credentials_path)\n kwargs['credentials'] = credentials\n return cls(*args, **kwargs)\n\n @classmethod\n def from_service_account_p12(cls, client_email, private_key_path,\n *args, **kwargs):\n \"\"\"Factory to retrieve P12 credentials while creating client.\n\n .. note::\n Unless you have an explicit reason to use a PKCS12 key for your\n service account, we recommend using a JSON key.\n\n :type client_email: string\n :param client_email: The e-mail attached to the service account.\n\n :type private_key_path: string\n :param private_key_path: The path to a private key file (this file was\n given to you when you created the service\n account). This file must be in P12 format.\n\n :type args: tuple\n :param args: Remaining positional arguments to pass to constructor.\n\n :type kwargs: dict\n :param kwargs: Remaining keyword arguments to pass to constructor.\n\n :rtype: :class:`gcloud.client.Client`\n :returns: The client created with the retrieved P12 credentials.\n :raises: :class:`TypeError` if there is a conflict with the kwargs\n and the credentials created by the factory.\n \"\"\"\n if 'credentials' in kwargs:\n raise TypeError('credentials must not be in keyword arguments')\n credentials = get_for_service_account_p12(client_email,\n private_key_path)\n kwargs['credentials'] = credentials\n return cls(*args, **kwargs)\n\n\nclass Client(_ClientFactoryMixin):\n \"\"\"Client to bundle configuration needed for API requests.\n\n Assumes that the associated ``_connection_class`` only accepts\n ``http`` and ``credentials`` in its constructor.\n\n :type credentials: :class:`oauth2client.client.OAuth2Credentials` or\n :class:`NoneType`\n :param credentials: The OAuth2 Credentials to use for the connection\n owned by this client. If not passed (and if no ``http``\n object is passed), falls back to the default inferred\n from the environment.\n\n :type http: :class:`httplib2.Http` or class that defines ``request()``.\n :param http: An optional HTTP object to make requests. If not passed, an\n ``http`` object is created that is bound to the\n ``credentials`` for the current object.\n \"\"\"\n\n _connection_class = Connection\n\n def __init__(self, credentials=None, http=None):\n if credentials is None and http is None:\n credentials = get_credentials()\n self.connection = self._connection_class(\n credentials=credentials, http=http)\n\n\nclass _ClientProjectMixin(object):\n \"\"\"Mixin to allow setting the project on the client.\n\n :type project: string\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :raises: :class:`ValueError` if the project is neither passed in nor\n set in the environment.\n \"\"\"\n\n def __init__(self, project=None):\n project = _determine_default_project(project)\n if project is None:\n raise ValueError('Project was not passed and could not be '\n 'determined from the environment.')\n if not isinstance(project, six.string_types):\n raise ValueError('Project must be a string.')\n self.project = project\n\n\nclass JSONClient(Client, _ClientProjectMixin):\n \"\"\"Client to for Google JSON-based API.\n\n Assumes such APIs use the ``project`` and the client needs to store this\n value.\n\n :type project: string\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :type credentials: :class:`oauth2client.client.OAuth2Credentials` or\n :class:`NoneType`\n :param credentials: The OAuth2 Credentials to use for the connection\n owned by this client. If not passed (and if no ``http``\n object is passed), falls back to the default inferred\n from the environment.\n\n :type http: :class:`httplib2.Http` or class that defines ``request()``.\n :param http: An optional HTTP object to make requests. If not passed, an\n ``http`` object is created that is bound to the\n ``credentials`` for the current object.\n\n :raises: :class:`ValueError` if the project is neither passed in nor\n set in the environment.\n \"\"\"\n\n def __init__(self, project=None, credentials=None, http=None):\n _ClientProjectMixin.__init__(self, project=project)\n Client.__init__(self, credentials=credentials, http=http)\n", "path": "gcloud/client.py"}]} | 2,719 | 116 |
gh_patches_debug_1484 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1891 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
allow PyJWT 2+
other libraries are moving to PyJWT2+ as requirement, is it possible to update pygithub as well? currently we can't use for example pygithub together with django-social-core
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 Vincent Jacques <[email protected]> #
8 # Copyright 2014 Tomas Radej <[email protected]> #
9 # Copyright 2014 Vincent Jacques <[email protected]> #
10 # Copyright 2015 Jimmy Zelinskie <[email protected]> #
11 # Copyright 2016 Felix Yan <[email protected]> #
12 # Copyright 2016 Jakub Wilk <[email protected]> #
13 # Copyright 2016 Jannis Gebauer <[email protected]> #
14 # Copyright 2016 Peter Buckley <[email protected]> #
15 # Copyright 2017 Hugo <[email protected]> #
16 # Copyright 2017 Jannis Gebauer <[email protected]> #
17 # Copyright 2017 Jannis Gebauer <[email protected]> #
18 # Copyright 2017 Nhomar Hernandez <[email protected]> #
19 # Copyright 2017 Paul Ortman <[email protected]> #
20 # Copyright 2018 Jason White <[email protected]> #
21 # Copyright 2018 Mike Miller <[email protected]> #
22 # Copyright 2018 Wan Liuyang <[email protected]> #
23 # Copyright 2018 sfdye <[email protected]> #
24 # #
25 # This file is part of PyGithub. #
26 # http://pygithub.readthedocs.io/ #
27 # #
28 # PyGithub is free software: you can redistribute it and/or modify it under #
29 # the terms of the GNU Lesser General Public License as published by the Free #
30 # Software Foundation, either version 3 of the License, or (at your option) #
31 # any later version. #
32 # #
33 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
34 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
35 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
36 # details. #
37 # #
38 # You should have received a copy of the GNU Lesser General Public License #
39 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
40 # #
41 ################################################################################
42
43 import textwrap
44
45 import setuptools
46
47 version = "1.54.1"
48
49
50 if __name__ == "__main__":
51 setuptools.setup(
52 name="PyGithub",
53 version=version,
54 description="Use the full Github API v3",
55 author="Vincent Jacques",
56 author_email="[email protected]",
57 url="https://github.com/pygithub/pygithub",
58 project_urls={
59 "Documentation": "http://pygithub.readthedocs.io/en/latest/",
60 "Source": "https://github.com/pygithub/pygithub",
61 "Tracker": "https://github.com/pygithub/pygithub/issues",
62 },
63 long_description=textwrap.dedent(
64 """\
65 (Very short) Tutorial
66 =====================
67
68 First create a Github instance::
69
70 from github import Github
71
72 # using username and password
73 g = Github("user", "password")
74
75 # or using an access token
76 g = Github("access_token")
77
78 Then play with your Github objects::
79
80 for repo in g.get_user().get_repos():
81 print(repo.name)
82 repo.edit(has_wiki=False)
83
84 Reference documentation
85 =======================
86
87 See http://pygithub.readthedocs.io/en/latest/"""
88 ),
89 packages=["github"],
90 package_data={"github": ["py.typed", "*.pyi"]},
91 classifiers=[
92 "Development Status :: 5 - Production/Stable",
93 "Environment :: Web Environment",
94 "Intended Audience :: Developers",
95 "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
96 "Operating System :: OS Independent",
97 "Programming Language :: Python",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3.6",
100 "Programming Language :: Python :: 3.7",
101 "Programming Language :: Python :: 3.8",
102 "Programming Language :: Python :: 3.9",
103 "Topic :: Software Development",
104 ],
105 python_requires=">=3.6",
106 install_requires=[
107 "deprecated",
108 "pyjwt<2.0",
109 "pynacl>=1.4.0",
110 "requests>=2.14.0",
111 ],
112 extras_require={"integrations": ["cryptography"]},
113 tests_require=["cryptography", "httpretty>=1.0.3"],
114 )
115
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,7 +105,7 @@
python_requires=">=3.6",
install_requires=[
"deprecated",
- "pyjwt<2.0",
+ "pyjwt>=2.0",
"pynacl>=1.4.0",
"requests>=2.14.0",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,7 +105,7 @@\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated\",\n- \"pyjwt<2.0\",\n+ \"pyjwt>=2.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n", "issue": "allow PyJWT 2+\nother libraries are moving to PyJWT2+ as requirement, is it possible to update pygithub as well? currently we can't use for example pygithub together with django-social-core\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Tomas Radej <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Jimmy Zelinskie <[email protected]> #\n# Copyright 2016 Felix Yan <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Hugo <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Nhomar Hernandez <[email protected]> #\n# Copyright 2017 Paul Ortman <[email protected]> #\n# Copyright 2018 Jason White <[email protected]> #\n# Copyright 2018 Mike Miller <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport textwrap\n\nimport setuptools\n\nversion = \"1.54.1\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pygithub/pygithub\",\n project_urls={\n \"Documentation\": \"http://pygithub.readthedocs.io/en/latest/\",\n \"Source\": \"https://github.com/pygithub/pygithub\",\n \"Tracker\": \"https://github.com/pygithub/pygithub/issues\",\n },\n long_description=textwrap.dedent(\n \"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n # using username and password\n g = Github(\"user\", \"password\")\n\n # or using an access token\n g = Github(\"access_token\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print(repo.name)\n repo.edit(has_wiki=False)\n\n Reference documentation\n =======================\n\n See http://pygithub.readthedocs.io/en/latest/\"\"\"\n ),\n packages=[\"github\"],\n package_data={\"github\": [\"py.typed\", \"*.pyi\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated\",\n \"pyjwt<2.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n extras_require={\"integrations\": [\"cryptography\"]},\n tests_require=[\"cryptography\", \"httpretty>=1.0.3\"],\n )\n", "path": "setup.py"}]} | 1,936 | 96 |
gh_patches_debug_17141 | rasdani/github-patches | git_diff | ultralytics__ultralytics-3112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dvclive.error
### Search before asking
- [X] I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar bug report.
### YOLOv8 Component
Training
### Bug
Hello,
I have used several time the YOLOV8 without problem.
Today, I tried to retrain my model and I updated the ultralytics packages and when I started the training, i got this error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/model.py", line 371, in train
self.trainer.train()
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py", line 192, in train
self._do_train(world_size)
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py", line 275, in _do_train
self._setup_train(world_size)
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py", line 268, in _setup_train
self.run_callbacks('on_pretrain_routine_end')
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py", line 165, in run_callbacks
callback(self)
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/utils/callbacks/dvc.py", line 76, in on_pretrain_routine_end
_log_plots(trainer.plots, 'train')
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/utils/callbacks/dvc.py", line 40, in _log_plots
_log_images(name, prefix)
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/utils/callbacks/dvc.py", line 33, in _log_images
live.log_image(os.path.join(prefix, image_path.name), image_path)
File "/home/jupyter-moussa/.local/lib/python3.9/site-packages/dvclive/live.py", line 249, in log_image
raise InvalidDataTypeError(name, type(val))
dvclive.error.InvalidDataTypeError: Data 'train/labels.jpg' has not supported type <class 'pathlib.PosixPath'>
what do you think about it?
Thank you.
### Environment
ultralytics 8.0.114
torch 2.0.1
torchaudio 0.13.1
torchvision 0.15.2
Ubuntu 22.04.1 LTS
### Minimal Reproducible Example
_No response_
### Additional
_No response_
### Are you willing to submit a PR?
- [ ] Yes I'd like to help by submitting a PR!
</issue>
<code>
[start of ultralytics/yolo/utils/callbacks/dvc.py]
1 # Ultralytics YOLO 🚀, GPL-3.0 license
2 import os
3
4 from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
5 from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
6
7 try:
8 from importlib.metadata import version
9
10 import dvclive
11
12 assert not TESTS_RUNNING # do not log pytest
13 assert version('dvclive')
14 except (ImportError, AssertionError):
15 dvclive = None
16
17 # DVCLive logger instance
18 live = None
19 _processed_plots = {}
20
21 # `on_fit_epoch_end` is called on final validation (probably need to be fixed)
22 # for now this is the way we distinguish final evaluation of the best model vs
23 # last epoch validation
24 _training_epoch = False
25
26
27 def _logger_disabled():
28 return os.getenv('ULTRALYTICS_DVC_DISABLED', 'false').lower() == 'true'
29
30
31 def _log_images(image_path, prefix=''):
32 if live:
33 live.log_image(os.path.join(prefix, image_path.name), image_path)
34
35
36 def _log_plots(plots, prefix=''):
37 for name, params in plots.items():
38 timestamp = params['timestamp']
39 if _processed_plots.get(name, None) != timestamp:
40 _log_images(name, prefix)
41 _processed_plots[name] = timestamp
42
43
44 def _log_confusion_matrix(validator):
45 targets = []
46 preds = []
47 matrix = validator.confusion_matrix.matrix
48 names = list(validator.names.values())
49 if validator.confusion_matrix.task == 'detect':
50 names += ['background']
51
52 for ti, pred in enumerate(matrix.T.astype(int)):
53 for pi, num in enumerate(pred):
54 targets.extend([names[ti]] * num)
55 preds.extend([names[pi]] * num)
56
57 live.log_sklearn_plot('confusion_matrix', targets, preds, name='cf.json', normalized=True)
58
59
60 def on_pretrain_routine_start(trainer):
61 try:
62 global live
63 if not _logger_disabled():
64 live = dvclive.Live(save_dvc_exp=True)
65 LOGGER.info(
66 'DVCLive is detected and auto logging is enabled (can be disabled with `ULTRALYTICS_DVC_DISABLED=true`).'
67 )
68 else:
69 LOGGER.debug('DVCLive is detected and auto logging is disabled via `ULTRALYTICS_DVC_DISABLED`.')
70 live = None
71 except Exception as e:
72 LOGGER.warning(f'WARNING ⚠️ DVCLive installed but not initialized correctly, not logging this run. {e}')
73
74
75 def on_pretrain_routine_end(trainer):
76 _log_plots(trainer.plots, 'train')
77
78
79 def on_train_start(trainer):
80 if live:
81 live.log_params(trainer.args)
82
83
84 def on_train_epoch_start(trainer):
85 global _training_epoch
86 _training_epoch = True
87
88
89 def on_fit_epoch_end(trainer):
90 global _training_epoch
91 if live and _training_epoch:
92 all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr}
93 for metric, value in all_metrics.items():
94 live.log_metric(metric, value)
95
96 if trainer.epoch == 0:
97 model_info = {
98 'model/parameters': get_num_params(trainer.model),
99 'model/GFLOPs': round(get_flops(trainer.model), 3),
100 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
101
102 for metric, value in model_info.items():
103 live.log_metric(metric, value, plot=False)
104
105 _log_plots(trainer.plots, 'train')
106 _log_plots(trainer.validator.plots, 'val')
107
108 live.next_step()
109 _training_epoch = False
110
111
112 def on_train_end(trainer):
113 if live:
114 # At the end log the best metrics. It runs validator on the best model internally.
115 all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr}
116 for metric, value in all_metrics.items():
117 live.log_metric(metric, value, plot=False)
118
119 _log_plots(trainer.plots, 'eval')
120 _log_plots(trainer.validator.plots, 'eval')
121 _log_confusion_matrix(trainer.validator)
122
123 if trainer.best.exists():
124 live.log_artifact(trainer.best, copy=True)
125
126 live.end()
127
128
129 callbacks = {
130 'on_pretrain_routine_start': on_pretrain_routine_start,
131 'on_pretrain_routine_end': on_pretrain_routine_end,
132 'on_train_start': on_train_start,
133 'on_train_epoch_start': on_train_epoch_start,
134 'on_fit_epoch_end': on_fit_epoch_end,
135 'on_train_end': on_train_end} if dvclive else {}
136
[end of ultralytics/yolo/utils/callbacks/dvc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ultralytics/yolo/utils/callbacks/dvc.py b/ultralytics/yolo/utils/callbacks/dvc.py
--- a/ultralytics/yolo/utils/callbacks/dvc.py
+++ b/ultralytics/yolo/utils/callbacks/dvc.py
@@ -1,6 +1,8 @@
# Ultralytics YOLO 🚀, GPL-3.0 license
import os
+import pkg_resources as pkg
+
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
@@ -10,8 +12,12 @@
import dvclive
assert not TESTS_RUNNING # do not log pytest
- assert version('dvclive')
-except (ImportError, AssertionError):
+
+ ver = version('dvclive')
+ if pkg.parse_version(ver) < pkg.parse_version('2.11.0'):
+ LOGGER.debug(f'DVCLive is detected but version {ver} is incompatible (>=2.11 required).')
+ dvclive = None # noqa: F811
+except (ImportError, AssertionError, TypeError):
dvclive = None
# DVCLive logger instance
| {"golden_diff": "diff --git a/ultralytics/yolo/utils/callbacks/dvc.py b/ultralytics/yolo/utils/callbacks/dvc.py\n--- a/ultralytics/yolo/utils/callbacks/dvc.py\n+++ b/ultralytics/yolo/utils/callbacks/dvc.py\n@@ -1,6 +1,8 @@\n # Ultralytics YOLO \ud83d\ude80, GPL-3.0 license\n import os\n \n+import pkg_resources as pkg\n+\n from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING\n from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params\n \n@@ -10,8 +12,12 @@\n import dvclive\n \n assert not TESTS_RUNNING # do not log pytest\n- assert version('dvclive')\n-except (ImportError, AssertionError):\n+\n+ ver = version('dvclive')\n+ if pkg.parse_version(ver) < pkg.parse_version('2.11.0'):\n+ LOGGER.debug(f'DVCLive is detected but version {ver} is incompatible (>=2.11 required).')\n+ dvclive = None # noqa: F811\n+except (ImportError, AssertionError, TypeError):\n dvclive = None\n \n # DVCLive logger instance\n", "issue": "dvclive.error\n### Search before asking\n\n- [X] I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar bug report.\n\n\n### YOLOv8 Component\n\nTraining\n\n### Bug\n\nHello,\r\nI have used several time the YOLOV8 without problem.\r\nToday, I tried to retrain my model and I updated the ultralytics packages and when I started the training, i got this error:\r\n\r\n Traceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/model.py\", line 371, in train\r\n self.trainer.train()\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py\", line 192, in train\r\n self._do_train(world_size)\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py\", line 275, in _do_train\r\n self._setup_train(world_size)\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py\", line 268, in _setup_train\r\n self.run_callbacks('on_pretrain_routine_end')\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/engine/trainer.py\", line 165, in run_callbacks\r\n callback(self)\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/utils/callbacks/dvc.py\", line 76, in on_pretrain_routine_end\r\n _log_plots(trainer.plots, 'train')\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/utils/callbacks/dvc.py\", line 40, in _log_plots\r\n _log_images(name, prefix)\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/ultralytics/yolo/utils/callbacks/dvc.py\", line 33, in _log_images\r\n live.log_image(os.path.join(prefix, image_path.name), image_path)\r\n File \"/home/jupyter-moussa/.local/lib/python3.9/site-packages/dvclive/live.py\", line 249, in log_image\r\n raise InvalidDataTypeError(name, type(val))\r\n dvclive.error.InvalidDataTypeError: Data 'train/labels.jpg' has not supported type <class 'pathlib.PosixPath'>\r\n\r\nwhat do you think about it?\r\n\r\nThank you.\n\n### Environment\n\nultralytics 8.0.114\r\ntorch 2.0.1\r\ntorchaudio 0.13.1\r\ntorchvision 0.15.2\r\nUbuntu 22.04.1 LTS\n\n### Minimal Reproducible Example\n\n_No response_\n\n### Additional\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [ ] Yes I'd like to help by submitting a PR!\n", "before_files": [{"content": "# Ultralytics YOLO \ud83d\ude80, GPL-3.0 license\nimport os\n\nfrom ultralytics.yolo.utils import LOGGER, TESTS_RUNNING\nfrom ultralytics.yolo.utils.torch_utils import get_flops, get_num_params\n\ntry:\n from importlib.metadata import version\n\n import dvclive\n\n assert not TESTS_RUNNING # do not log pytest\n assert version('dvclive')\nexcept (ImportError, AssertionError):\n dvclive = None\n\n# DVCLive logger instance\nlive = None\n_processed_plots = {}\n\n# `on_fit_epoch_end` is called on final validation (probably need to be fixed)\n# for now this is the way we distinguish final evaluation of the best model vs\n# last epoch validation\n_training_epoch = False\n\n\ndef _logger_disabled():\n return os.getenv('ULTRALYTICS_DVC_DISABLED', 'false').lower() == 'true'\n\n\ndef _log_images(image_path, prefix=''):\n if live:\n live.log_image(os.path.join(prefix, image_path.name), image_path)\n\n\ndef _log_plots(plots, prefix=''):\n for name, params in plots.items():\n timestamp = params['timestamp']\n if _processed_plots.get(name, None) != timestamp:\n _log_images(name, prefix)\n _processed_plots[name] = timestamp\n\n\ndef _log_confusion_matrix(validator):\n targets = []\n preds = []\n matrix = validator.confusion_matrix.matrix\n names = list(validator.names.values())\n if validator.confusion_matrix.task == 'detect':\n names += ['background']\n\n for ti, pred in enumerate(matrix.T.astype(int)):\n for pi, num in enumerate(pred):\n targets.extend([names[ti]] * num)\n preds.extend([names[pi]] * num)\n\n live.log_sklearn_plot('confusion_matrix', targets, preds, name='cf.json', normalized=True)\n\n\ndef on_pretrain_routine_start(trainer):\n try:\n global live\n if not _logger_disabled():\n live = dvclive.Live(save_dvc_exp=True)\n LOGGER.info(\n 'DVCLive is detected and auto logging is enabled (can be disabled with `ULTRALYTICS_DVC_DISABLED=true`).'\n )\n else:\n LOGGER.debug('DVCLive is detected and auto logging is disabled via `ULTRALYTICS_DVC_DISABLED`.')\n live = None\n except Exception as e:\n LOGGER.warning(f'WARNING \u26a0\ufe0f DVCLive installed but not initialized correctly, not logging this run. {e}')\n\n\ndef on_pretrain_routine_end(trainer):\n _log_plots(trainer.plots, 'train')\n\n\ndef on_train_start(trainer):\n if live:\n live.log_params(trainer.args)\n\n\ndef on_train_epoch_start(trainer):\n global _training_epoch\n _training_epoch = True\n\n\ndef on_fit_epoch_end(trainer):\n global _training_epoch\n if live and _training_epoch:\n all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr}\n for metric, value in all_metrics.items():\n live.log_metric(metric, value)\n\n if trainer.epoch == 0:\n model_info = {\n 'model/parameters': get_num_params(trainer.model),\n 'model/GFLOPs': round(get_flops(trainer.model), 3),\n 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}\n\n for metric, value in model_info.items():\n live.log_metric(metric, value, plot=False)\n\n _log_plots(trainer.plots, 'train')\n _log_plots(trainer.validator.plots, 'val')\n\n live.next_step()\n _training_epoch = False\n\n\ndef on_train_end(trainer):\n if live:\n # At the end log the best metrics. It runs validator on the best model internally.\n all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr}\n for metric, value in all_metrics.items():\n live.log_metric(metric, value, plot=False)\n\n _log_plots(trainer.plots, 'eval')\n _log_plots(trainer.validator.plots, 'eval')\n _log_confusion_matrix(trainer.validator)\n\n if trainer.best.exists():\n live.log_artifact(trainer.best, copy=True)\n\n live.end()\n\n\ncallbacks = {\n 'on_pretrain_routine_start': on_pretrain_routine_start,\n 'on_pretrain_routine_end': on_pretrain_routine_end,\n 'on_train_start': on_train_start,\n 'on_train_epoch_start': on_train_epoch_start,\n 'on_fit_epoch_end': on_fit_epoch_end,\n 'on_train_end': on_train_end} if dvclive else {}\n", "path": "ultralytics/yolo/utils/callbacks/dvc.py"}]} | 2,592 | 276 |
gh_patches_debug_14714 | rasdani/github-patches | git_diff | bokeh__bokeh-8466 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"CustomJS for Selections" Example in Docs Broken
In the latest version of the docs, it appears [this example]( https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/callbacks.html#customjs-for-selections ) is broken. This is also true of the example in the Bokeh 1.0.0 docs. Selecting points in the plot on the left does not result in points being shown in the right plot. Compare this to [the same plot using Bokeh 0.13.0]( https://bokeh.pydata.org/en/0.13.0/docs/user_guide/interaction/callbacks.html#customjs-for-selections ), which seems to work without issues.
</issue>
<code>
[start of sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py]
1 from random import random
2
3 from bokeh.layouts import row
4 from bokeh.models import CustomJS, ColumnDataSource
5 from bokeh.plotting import figure, output_file, show
6
7 output_file("callback.html")
8
9 x = [random() for x in range(500)]
10 y = [random() for y in range(500)]
11
12 s1 = ColumnDataSource(data=dict(x=x, y=y))
13 p1 = figure(plot_width=400, plot_height=400, tools="lasso_select", title="Select Here")
14 p1.circle('x', 'y', source=s1, alpha=0.6)
15
16 s2 = ColumnDataSource(data=dict(x=[], y=[]))
17 p2 = figure(plot_width=400, plot_height=400, x_range=(0, 1), y_range=(0, 1),
18 tools="", title="Watch Here")
19 p2.circle('x', 'y', source=s2, alpha=0.6)
20
21 s1.callback = CustomJS(args=dict(s2=s2), code="""
22 var inds = cb_obj.selected.indices;
23 var d1 = cb_obj.data;
24 var d2 = s2.data;
25 d2['x'] = []
26 d2['y'] = []
27 for (var i = 0; i < inds.length; i++) {
28 d2['x'].push(d1['x'][inds[i]])
29 d2['y'].push(d1['y'][inds[i]])
30 }
31 s2.change.emit();
32 """)
33
34 layout = row(p1, p2)
35
36 show(layout)
37
[end of sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py
--- a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py
+++ b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py
@@ -18,9 +18,9 @@
tools="", title="Watch Here")
p2.circle('x', 'y', source=s2, alpha=0.6)
-s1.callback = CustomJS(args=dict(s2=s2), code="""
- var inds = cb_obj.selected.indices;
- var d1 = cb_obj.data;
+s1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1, s2=s2), code="""
+ var inds = cb_obj.indices;
+ var d1 = s1.data;
var d2 = s2.data;
d2['x'] = []
d2['y'] = []
@@ -30,6 +30,7 @@
}
s2.change.emit();
""")
+)
layout = row(p1, p2)
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py\n--- a/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py\n+++ b/sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py\n@@ -18,9 +18,9 @@\n tools=\"\", title=\"Watch Here\")\n p2.circle('x', 'y', source=s2, alpha=0.6)\n \n-s1.callback = CustomJS(args=dict(s2=s2), code=\"\"\"\n- var inds = cb_obj.selected.indices;\n- var d1 = cb_obj.data;\n+s1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1, s2=s2), code=\"\"\"\n+ var inds = cb_obj.indices;\n+ var d1 = s1.data;\n var d2 = s2.data;\n d2['x'] = []\n d2['y'] = []\n@@ -30,6 +30,7 @@\n }\n s2.change.emit();\n \"\"\")\n+)\n \n layout = row(p1, p2)\n", "issue": "\"CustomJS for Selections\" Example in Docs Broken\nIn the latest version of the docs, it appears [this example]( https://bokeh.pydata.org/en/latest/docs/user_guide/interaction/callbacks.html#customjs-for-selections ) is broken. This is also true of the example in the Bokeh 1.0.0 docs. Selecting points in the plot on the left does not result in points being shown in the right plot. Compare this to [the same plot using Bokeh 0.13.0]( https://bokeh.pydata.org/en/0.13.0/docs/user_guide/interaction/callbacks.html#customjs-for-selections ), which seems to work without issues.\n", "before_files": [{"content": "from random import random\n\nfrom bokeh.layouts import row\nfrom bokeh.models import CustomJS, ColumnDataSource\nfrom bokeh.plotting import figure, output_file, show\n\noutput_file(\"callback.html\")\n\nx = [random() for x in range(500)]\ny = [random() for y in range(500)]\n\ns1 = ColumnDataSource(data=dict(x=x, y=y))\np1 = figure(plot_width=400, plot_height=400, tools=\"lasso_select\", title=\"Select Here\")\np1.circle('x', 'y', source=s1, alpha=0.6)\n\ns2 = ColumnDataSource(data=dict(x=[], y=[]))\np2 = figure(plot_width=400, plot_height=400, x_range=(0, 1), y_range=(0, 1),\n tools=\"\", title=\"Watch Here\")\np2.circle('x', 'y', source=s2, alpha=0.6)\n\ns1.callback = CustomJS(args=dict(s2=s2), code=\"\"\"\n var inds = cb_obj.selected.indices;\n var d1 = cb_obj.data;\n var d2 = s2.data;\n d2['x'] = []\n d2['y'] = []\n for (var i = 0; i < inds.length; i++) {\n d2['x'].push(d1['x'][inds[i]])\n d2['y'].push(d1['y'][inds[i]])\n }\n s2.change.emit();\n \"\"\")\n\nlayout = row(p1, p2)\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/interaction_callbacks_for_selections.py"}]} | 1,107 | 249 |
gh_patches_debug_2590 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-8120 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wafv2 logging error when using cloudtrail mode
### Describe the bug
When my wafv2 logging policy runs after disabling logging I receive an error on a cloudtrail policy when using the DeleteLoggingConfiguration event.
### What did you expect to happen?
I expected the policy to match my resource.
### Cloud Provider
Amazon Web Services (AWS)
### Cloud Custodian version and dependency information
```shell
custodian version --debug
Please copy/paste the following info along with any bug reports:
Custodian: 0.9.21
Python: 3.8.0 (v3.8.0:fa919fdf25, Oct 14 2019, 10:23:27)
[Clang 6.0 (clang-600.0.57)]
Platform: posix.uname_result(sysname='Darwin', nodename='kristen-MacBook-Pro', release='21.4.0', version='Darwin Kernel Version 21.4.0: Mon Feb 21 20:35:58 PST 2022; root:xnu-8020.101.4~2/RELEASE_ARM64_T6000', machine='x86_64')
Using venv: True
Docker: False
Installed:
argcomplete==2.0.0
attrs==22.1.0
boto3==1.26.30
botocore==1.29.30
docutils==0.17.1
importlib-metadata==4.13.0
importlib-resources==5.10.1
jmespath==1.0.1
jsonschema==4.17.3
pkgutil-resolve-name==1.3.10
pyrsistent==0.19.2
python-dateutil==2.8.2
pyyaml==6.0
s3transfer==0.6.0
six==1.16.0
tabulate==0.8.10
urllib3==1.26.13
zipp==3.11.0
```
### Policy
```shell
Policy example:
- name: wafv2-log-testing
resource: aws.wafv2
mode:
role: arn:aws:iam::testing
type: cloudtrail
events:
- event: DeleteLoggingConfiguration
ids: requestParameters.resourceArn
source: wafv2.amazonaws.com
filters:
- not:
- type: logging
key: ResourceArn
value: present
```
### Relevant log/traceback output
```shell
Error when policy runs:
[ERROR] 2023-01-06T18:48:11.706Z 163a02a3-69d6-4d43-a307-365ddcb8ead7 error during policy executionTraceback (most recent call last): File "/var/task/c7n/handler.py", line 165, in dispatch_event p.push(event, context) File "/var/task/c7n/policy.py", line 1288, in push return mode.run(event, lambda_ctx) File "/var/task/c7n/policy.py", line 487, in run resources = self.resolve_resources(event) File "/var/task/c7n/policy.py", line 691, in resolve_resources return super().resolve_resources(event) File "/var/task/c7n/policy.py", line 469, in resolve_resources resources = self.policy.resource_manager.get_resources(resource_ids) File "/var/task/c7n/query.py", line 576, in get_resources resources = self.source.get_resources(ids) File "/var/task/c7n/query.py", line 227, in get_resources return self.query.get(self.manager, ids) File "/var/task/c7n/query.py", line 100, in get resources = self.filter(resource_manager, **params) File "/var/task/c7n/query.py", line 79, in filter return self._invoke_client_enum( File "/var/task/c7n/query.py", line 60, in _invoke_client_enum data = op(**params) File "/var/runtime/botocore/client.py", line 391, in _api_call return self._make_api_call(operation_name, kwargs) File "/var/runtime/botocore/client.py", line 691, in _make_api_call request_dict = self._convert_to_request_dict( File "/var/runtime/botocore/client.py", line 739, in _convert_to_request_dict request_dict = self._serializer.serialize_to_request( File "/var/runtime/botocore/validate.py", line 360, in serialize_to_request raise ParamValidationError(report=report.generate_report())botocore.exceptions.ParamValidationError: Parameter validation failed:Missing required parameter in input: "Scope" | [ERROR] 2023-01-06T18:48:11.706Z 163a02a3-69d6-4d43-a307-365ddcb8ead7 error during policy execution Traceback (most recent call last): File "/var/task/c7n/handler.py", line 165, in dispatch_event p.push(event, context) File "/var/task/c7n/policy.py", line 1288, in push return mode.run(event, lambda_ctx) File "/var/task/c7n/policy.py", line 487, in run resources = self.resolve_resources(event) File "/var/task/c7n/policy.py", line 691, in resolve_resources return super().resolve_resources(event) File "/var/task/c7n/policy.py", line 469, in resolve_resources resources = self.policy.resource_manager.get_resources(resource_ids) File "/var/task/c7n/query.py", line 576, in get_resources resources = self.source.get_resources(ids) File "/var/task/c7n/query.py", line 227, in get_resources return self.query.get(self.manager, ids) File "/var/task/c7n/query.py", line 100, in get resources = self.filter(resource_manager, **params) File "/var/task/c7n/query.py", line 79, in filter return self._invoke_client_enum( File "/var/task/c7n/query.py", line 60, in _invoke_client_enum data = op(**params) File "/var/runtime/botocore/client.py", line 391, in _api_call return self._make_api_call(operation_name, kwargs) File "/var/runtime/botocore/client.py", line 691, in _make_api_call request_dict = self._convert_to_request_dict( File "/var/runtime/botocore/client.py", line 739, in _convert_to_request_dict request_dict = self._serializer.serialize_to_request( File "/var/runtime/botocore/validate.py", line 360, in serialize_to_request raise ParamValidationError(report=report.generate_report()) botocore.exceptions.ParamValidationError: Parameter validation failed: Missing required parameter in input: "Scope"
-- | --
```
### Extra information or context
The pull mode version of this policy works fine.
</issue>
<code>
[start of c7n/resources/waf.py]
1 # Copyright The Cloud Custodian Authors.
2 # SPDX-License-Identifier: Apache-2.0
3 from c7n.manager import resources
4 from c7n.query import ConfigSource, QueryResourceManager, TypeInfo, DescribeSource
5 from c7n.tags import universal_augment
6 from c7n.filters import ValueFilter
7 from c7n.utils import type_schema, local_session
8
9
10 class DescribeRegionalWaf(DescribeSource):
11 def augment(self, resources):
12 resources = super().augment(resources)
13 return universal_augment(self.manager, resources)
14
15
16 class DescribeWafV2(DescribeSource):
17 def augment(self, resources):
18 return universal_augment(self.manager, resources)
19
20 # set REGIONAL for Scope as default
21 def get_query_params(self, query):
22 q = super(DescribeWafV2, self).get_query_params(query)
23 if q:
24 if 'Scope' not in q:
25 q['Scope'] = 'REGIONAL'
26 else:
27 q = {'Scope': 'REGIONAL'}
28 return q
29
30
31 @resources.register('waf')
32 class WAF(QueryResourceManager):
33
34 class resource_type(TypeInfo):
35 service = "waf"
36 enum_spec = ("list_web_acls", "WebACLs", None)
37 detail_spec = ("get_web_acl", "WebACLId", "WebACLId", "WebACL")
38 name = "Name"
39 id = "WebACLId"
40 dimension = "WebACL"
41 cfn_type = config_type = "AWS::WAF::WebACL"
42 arn_type = "webacl"
43 # override defaults to casing issues
44 permissions_enum = ('waf:ListWebACLs',)
45 permissions_augment = ('waf:GetWebACL',)
46
47
48 @resources.register('waf-regional')
49 class RegionalWAF(QueryResourceManager):
50
51 class resource_type(TypeInfo):
52 service = "waf-regional"
53 enum_spec = ("list_web_acls", "WebACLs", None)
54 detail_spec = ("get_web_acl", "WebACLId", "WebACLId", "WebACL")
55 name = "Name"
56 id = "WebACLId"
57 dimension = "WebACL"
58 cfn_type = config_type = "AWS::WAFRegional::WebACL"
59 arn_type = "webacl"
60 # override defaults to casing issues
61 permissions_enum = ('waf-regional:ListWebACLs',)
62 permissions_augment = ('waf-regional:GetWebACL',)
63 universal_taggable = object()
64
65 source_mapping = {
66 'describe': DescribeRegionalWaf,
67 'config': ConfigSource
68 }
69
70
71 @resources.register('wafv2')
72 class WAFV2(QueryResourceManager):
73
74 class resource_type(TypeInfo):
75 service = "wafv2"
76 enum_spec = ("list_web_acls", "WebACLs", None)
77 detail_spec = ("get_web_acl", "Id", "Id", "WebACL")
78 name = "Name"
79 id = "Id"
80 dimension = "WebACL"
81 cfn_type = config_type = "AWS::WAFv2::WebACL"
82 arn_type = "webacl"
83 # override defaults to casing issues
84 permissions_enum = ('wafv2:ListWebACLs',)
85 permissions_augment = ('wafv2:GetWebACL',)
86 universal_taggable = object()
87
88 source_mapping = {
89 'describe': DescribeWafV2,
90 'config': ConfigSource
91 }
92
93
94 @WAFV2.filter_registry.register('logging')
95 class WAFV2LoggingFilter(ValueFilter):
96 """
97 Filter by wafv2 logging configuration
98
99 :example:
100
101 .. code-block:: yaml
102
103 policies:
104 - name: wafv2-logging-enabled
105 resource: aws.wafv2
106 filters:
107 - not:
108 - type: logging
109 key: ResourceArn
110 value: present
111
112 - name: check-redacted-fields
113 resource: aws.wafv2
114 filters:
115 - type: logging
116 key: RedactedFields[].SingleHeader.Name
117 value: user-agent
118 op: in
119 value_type: swap
120 """
121
122 schema = type_schema('logging', rinherit=ValueFilter.schema)
123 permissions = ('wafv2:GetLoggingConfiguration', )
124 annotation_key = 'c7n:WafV2LoggingConfiguration'
125
126 def process(self, resources, event=None):
127 client = local_session(self.manager.session_factory).client(
128 'wafv2', region_name=self.manager.region)
129 logging_confs = client.list_logging_configurations(
130 Scope='REGIONAL')['LoggingConfigurations']
131 resource_map = {r['ARN']: r for r in resources}
132 for lc in logging_confs:
133 if lc['ResourceArn'] in resource_map:
134 resource_map[lc['ResourceArn']][self.annotation_key] = lc
135
136 resources = list(resource_map.values())
137
138 return [
139 r for r in resources if self.match(
140 r.get(self.annotation_key, {}))]
141
[end of c7n/resources/waf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/resources/waf.py b/c7n/resources/waf.py
--- a/c7n/resources/waf.py
+++ b/c7n/resources/waf.py
@@ -27,6 +27,10 @@
q = {'Scope': 'REGIONAL'}
return q
+ def get_resources(self, ids):
+ resources = self.query.filter(self.manager, **self.get_query_params(None))
+ return [r for r in resources if r[self.manager.resource_type.id] in ids]
+
@resources.register('waf')
class WAF(QueryResourceManager):
| {"golden_diff": "diff --git a/c7n/resources/waf.py b/c7n/resources/waf.py\n--- a/c7n/resources/waf.py\n+++ b/c7n/resources/waf.py\n@@ -27,6 +27,10 @@\n q = {'Scope': 'REGIONAL'}\n return q\n \n+ def get_resources(self, ids):\n+ resources = self.query.filter(self.manager, **self.get_query_params(None))\n+ return [r for r in resources if r[self.manager.resource_type.id] in ids]\n+\n \n @resources.register('waf')\n class WAF(QueryResourceManager):\n", "issue": "Wafv2 logging error when using cloudtrail mode\n### Describe the bug\n\nWhen my wafv2 logging policy runs after disabling logging I receive an error on a cloudtrail policy when using the DeleteLoggingConfiguration event.\n\n### What did you expect to happen?\n\nI expected the policy to match my resource.\n\n### Cloud Provider\n\nAmazon Web Services (AWS)\n\n### Cloud Custodian version and dependency information\n\n```shell\ncustodian version --debug\r\n\r\nPlease copy/paste the following info along with any bug reports:\r\n\r\nCustodian: 0.9.21\r\nPython: 3.8.0 (v3.8.0:fa919fdf25, Oct 14 2019, 10:23:27) \r\n [Clang 6.0 (clang-600.0.57)]\r\nPlatform: posix.uname_result(sysname='Darwin', nodename='kristen-MacBook-Pro', release='21.4.0', version='Darwin Kernel Version 21.4.0: Mon Feb 21 20:35:58 PST 2022; root:xnu-8020.101.4~2/RELEASE_ARM64_T6000', machine='x86_64')\r\nUsing venv: True\r\nDocker: False\r\nInstalled: \r\n\r\nargcomplete==2.0.0\r\nattrs==22.1.0\r\nboto3==1.26.30\r\nbotocore==1.29.30\r\ndocutils==0.17.1\r\nimportlib-metadata==4.13.0\r\nimportlib-resources==5.10.1\r\njmespath==1.0.1\r\njsonschema==4.17.3\r\npkgutil-resolve-name==1.3.10\r\npyrsistent==0.19.2\r\npython-dateutil==2.8.2\r\npyyaml==6.0\r\ns3transfer==0.6.0\r\nsix==1.16.0\r\ntabulate==0.8.10\r\nurllib3==1.26.13\r\nzipp==3.11.0\n```\n\n\n### Policy\n\n```shell\nPolicy example:\r\n\r\n- name: wafv2-log-testing\r\n resource: aws.wafv2\r\n mode:\r\n role: arn:aws:iam::testing\r\n type: cloudtrail\r\n events: \r\n - event: DeleteLoggingConfiguration\r\n ids: requestParameters.resourceArn\r\n source: wafv2.amazonaws.com\r\n filters: \r\n - not:\r\n - type: logging\r\n key: ResourceArn\r\n value: present\n```\n\n\n### Relevant log/traceback output\n\n```shell\nError when policy runs:\r\n\r\n\r\n[ERROR]\t2023-01-06T18:48:11.706Z\t163a02a3-69d6-4d43-a307-365ddcb8ead7\terror during policy executionTraceback (most recent call last): File \"/var/task/c7n/handler.py\", line 165, in dispatch_event p.push(event, context) File \"/var/task/c7n/policy.py\", line 1288, in push return mode.run(event, lambda_ctx) File \"/var/task/c7n/policy.py\", line 487, in run resources = self.resolve_resources(event) File \"/var/task/c7n/policy.py\", line 691, in resolve_resources return super().resolve_resources(event) File \"/var/task/c7n/policy.py\", line 469, in resolve_resources resources = self.policy.resource_manager.get_resources(resource_ids) File \"/var/task/c7n/query.py\", line 576, in get_resources resources = self.source.get_resources(ids) File \"/var/task/c7n/query.py\", line 227, in get_resources return self.query.get(self.manager, ids) File \"/var/task/c7n/query.py\", line 100, in get resources = self.filter(resource_manager, **params) File \"/var/task/c7n/query.py\", line 79, in filter return self._invoke_client_enum( File \"/var/task/c7n/query.py\", line 60, in _invoke_client_enum data = op(**params) File \"/var/runtime/botocore/client.py\", line 391, in _api_call return self._make_api_call(operation_name, kwargs) File \"/var/runtime/botocore/client.py\", line 691, in _make_api_call request_dict = self._convert_to_request_dict( File \"/var/runtime/botocore/client.py\", line 739, in _convert_to_request_dict request_dict = self._serializer.serialize_to_request( File \"/var/runtime/botocore/validate.py\", line 360, in serialize_to_request raise ParamValidationError(report=report.generate_report())botocore.exceptions.ParamValidationError: Parameter validation failed:Missing required parameter in input: \"Scope\" | [ERROR] 2023-01-06T18:48:11.706Z 163a02a3-69d6-4d43-a307-365ddcb8ead7 error during policy execution Traceback (most recent call last): File \"/var/task/c7n/handler.py\", line 165, in dispatch_event p.push(event, context) File \"/var/task/c7n/policy.py\", line 1288, in push return mode.run(event, lambda_ctx) File \"/var/task/c7n/policy.py\", line 487, in run resources = self.resolve_resources(event) File \"/var/task/c7n/policy.py\", line 691, in resolve_resources return super().resolve_resources(event) File \"/var/task/c7n/policy.py\", line 469, in resolve_resources resources = self.policy.resource_manager.get_resources(resource_ids) File \"/var/task/c7n/query.py\", line 576, in get_resources resources = self.source.get_resources(ids) File \"/var/task/c7n/query.py\", line 227, in get_resources return self.query.get(self.manager, ids) File \"/var/task/c7n/query.py\", line 100, in get resources = self.filter(resource_manager, **params) File \"/var/task/c7n/query.py\", line 79, in filter return self._invoke_client_enum( File \"/var/task/c7n/query.py\", line 60, in _invoke_client_enum data = op(**params) File \"/var/runtime/botocore/client.py\", line 391, in _api_call return self._make_api_call(operation_name, kwargs) File \"/var/runtime/botocore/client.py\", line 691, in _make_api_call request_dict = self._convert_to_request_dict( File \"/var/runtime/botocore/client.py\", line 739, in _convert_to_request_dict request_dict = self._serializer.serialize_to_request( File \"/var/runtime/botocore/validate.py\", line 360, in serialize_to_request raise ParamValidationError(report=report.generate_report()) botocore.exceptions.ParamValidationError: Parameter validation failed: Missing required parameter in input: \"Scope\"\r\n-- | --\n```\n\n\n### Extra information or context\n\nThe pull mode version of this policy works fine.\n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nfrom c7n.manager import resources\nfrom c7n.query import ConfigSource, QueryResourceManager, TypeInfo, DescribeSource\nfrom c7n.tags import universal_augment\nfrom c7n.filters import ValueFilter\nfrom c7n.utils import type_schema, local_session\n\n\nclass DescribeRegionalWaf(DescribeSource):\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\nclass DescribeWafV2(DescribeSource):\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n # set REGIONAL for Scope as default\n def get_query_params(self, query):\n q = super(DescribeWafV2, self).get_query_params(query)\n if q:\n if 'Scope' not in q:\n q['Scope'] = 'REGIONAL'\n else:\n q = {'Scope': 'REGIONAL'}\n return q\n\n\[email protected]('waf')\nclass WAF(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"waf\"\n enum_spec = (\"list_web_acls\", \"WebACLs\", None)\n detail_spec = (\"get_web_acl\", \"WebACLId\", \"WebACLId\", \"WebACL\")\n name = \"Name\"\n id = \"WebACLId\"\n dimension = \"WebACL\"\n cfn_type = config_type = \"AWS::WAF::WebACL\"\n arn_type = \"webacl\"\n # override defaults to casing issues\n permissions_enum = ('waf:ListWebACLs',)\n permissions_augment = ('waf:GetWebACL',)\n\n\[email protected]('waf-regional')\nclass RegionalWAF(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"waf-regional\"\n enum_spec = (\"list_web_acls\", \"WebACLs\", None)\n detail_spec = (\"get_web_acl\", \"WebACLId\", \"WebACLId\", \"WebACL\")\n name = \"Name\"\n id = \"WebACLId\"\n dimension = \"WebACL\"\n cfn_type = config_type = \"AWS::WAFRegional::WebACL\"\n arn_type = \"webacl\"\n # override defaults to casing issues\n permissions_enum = ('waf-regional:ListWebACLs',)\n permissions_augment = ('waf-regional:GetWebACL',)\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeRegionalWaf,\n 'config': ConfigSource\n }\n\n\[email protected]('wafv2')\nclass WAFV2(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"wafv2\"\n enum_spec = (\"list_web_acls\", \"WebACLs\", None)\n detail_spec = (\"get_web_acl\", \"Id\", \"Id\", \"WebACL\")\n name = \"Name\"\n id = \"Id\"\n dimension = \"WebACL\"\n cfn_type = config_type = \"AWS::WAFv2::WebACL\"\n arn_type = \"webacl\"\n # override defaults to casing issues\n permissions_enum = ('wafv2:ListWebACLs',)\n permissions_augment = ('wafv2:GetWebACL',)\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeWafV2,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('logging')\nclass WAFV2LoggingFilter(ValueFilter):\n \"\"\"\n Filter by wafv2 logging configuration\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: wafv2-logging-enabled\n resource: aws.wafv2\n filters:\n - not:\n - type: logging\n key: ResourceArn\n value: present\n\n - name: check-redacted-fields\n resource: aws.wafv2\n filters:\n - type: logging\n key: RedactedFields[].SingleHeader.Name\n value: user-agent\n op: in\n value_type: swap\n \"\"\"\n\n schema = type_schema('logging', rinherit=ValueFilter.schema)\n permissions = ('wafv2:GetLoggingConfiguration', )\n annotation_key = 'c7n:WafV2LoggingConfiguration'\n\n def process(self, resources, event=None):\n client = local_session(self.manager.session_factory).client(\n 'wafv2', region_name=self.manager.region)\n logging_confs = client.list_logging_configurations(\n Scope='REGIONAL')['LoggingConfigurations']\n resource_map = {r['ARN']: r for r in resources}\n for lc in logging_confs:\n if lc['ResourceArn'] in resource_map:\n resource_map[lc['ResourceArn']][self.annotation_key] = lc\n\n resources = list(resource_map.values())\n\n return [\n r for r in resources if self.match(\n r.get(self.annotation_key, {}))]\n", "path": "c7n/resources/waf.py"}]} | 3,586 | 128 |
gh_patches_debug_12317 | rasdani/github-patches | git_diff | ansible-collections__community.aws-283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ec2_win_password returns success when it fails to decode the password
### SUMMARY
An unsuccessful decode call returns:
```
ok: [localhost] => {
"changed": false,
"invocation": {
"module_args": {
[trimmed]
}
},
"win_password": ""
}
```
I would expect it to return a failure state
</issue>
<code>
[start of plugins/modules/ec2_win_password.py]
1 #!/usr/bin/python
2 # Copyright: Ansible Project
3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
4
5 from __future__ import absolute_import, division, print_function
6 __metaclass__ = type
7
8
9 DOCUMENTATION = '''
10 ---
11 module: ec2_win_password
12 version_added: 1.0.0
13 short_description: Gets the default administrator password for ec2 windows instances
14 description:
15 - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).
16 - This module has a dependency on python-boto.
17 author: "Rick Mendes (@rickmendes)"
18 options:
19 instance_id:
20 description:
21 - The instance id to get the password data from.
22 required: true
23 type: str
24 key_file:
25 description:
26 - Path to the file containing the key pair used on the instance.
27 - Conflicts with I(key_data).
28 required: false
29 type: path
30 key_data:
31 description:
32 - The private key (usually stored in vault).
33 - Conflicts with I(key_file),
34 required: false
35 type: str
36 key_passphrase:
37 description:
38 - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
39 convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3).
40 type: str
41 wait:
42 description:
43 - Whether or not to wait for the password to be available before returning.
44 type: bool
45 default: false
46 wait_timeout:
47 description:
48 - Number of seconds to wait before giving up.
49 default: 120
50 type: int
51
52 extends_documentation_fragment:
53 - amazon.aws.aws
54 - amazon.aws.ec2
55
56
57 requirements:
58 - cryptography
59
60 notes:
61 - As of Ansible 2.4, this module requires the python cryptography module rather than the
62 older pycrypto module.
63 '''
64
65 EXAMPLES = '''
66 # Example of getting a password
67 - name: get the Administrator password
68 community.aws.ec2_win_password:
69 profile: my-boto-profile
70 instance_id: i-XXXXXX
71 region: us-east-1
72 key_file: "~/aws-creds/my_test_key.pem"
73
74 # Example of getting a password using a variable
75 - name: get the Administrator password
76 community.aws.ec2_win_password:
77 profile: my-boto-profile
78 instance_id: i-XXXXXX
79 region: us-east-1
80 key_data: "{{ ec2_private_key }}"
81
82 # Example of getting a password with a password protected key
83 - name: get the Administrator password
84 community.aws.ec2_win_password:
85 profile: my-boto-profile
86 instance_id: i-XXXXXX
87 region: us-east-1
88 key_file: "~/aws-creds/my_protected_test_key.pem"
89 key_passphrase: "secret"
90
91 # Example of waiting for a password
92 - name: get the Administrator password
93 community.aws.ec2_win_password:
94 profile: my-boto-profile
95 instance_id: i-XXXXXX
96 region: us-east-1
97 key_file: "~/aws-creds/my_test_key.pem"
98 wait: yes
99 wait_timeout: 45
100 '''
101
102 import datetime
103 import time
104 from base64 import b64decode
105
106 try:
107 from cryptography.hazmat.backends import default_backend
108 from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
109 from cryptography.hazmat.primitives.serialization import load_pem_private_key
110 HAS_CRYPTOGRAPHY = True
111 except ImportError:
112 HAS_CRYPTOGRAPHY = False
113
114 from ansible.module_utils._text import to_bytes
115
116 from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
117 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO
118 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect
119
120
121 def setup_module_object():
122 argument_spec = dict(
123 instance_id=dict(required=True),
124 key_file=dict(required=False, default=None, type='path'),
125 key_passphrase=dict(no_log=True, default=None, required=False),
126 key_data=dict(no_log=True, default=None, required=False),
127 wait=dict(type='bool', default=False, required=False),
128 wait_timeout=dict(default=120, required=False, type='int'),
129 )
130 module = AnsibleAWSModule(argument_spec=argument_spec)
131 return module
132
133
134 def ec2_win_password(module):
135 instance_id = module.params.get('instance_id')
136 key_file = module.params.get('key_file')
137 if module.params.get('key_passphrase') is None:
138 b_key_passphrase = None
139 else:
140 b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict')
141 if module.params.get('key_data') is None:
142 b_key_data = None
143 else:
144 b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict')
145 wait = module.params.get('wait')
146 wait_timeout = module.params.get('wait_timeout')
147
148 ec2 = ec2_connect(module)
149
150 if wait:
151 start = datetime.datetime.now()
152 end = start + datetime.timedelta(seconds=wait_timeout)
153
154 while datetime.datetime.now() < end:
155 data = ec2.get_password_data(instance_id)
156 decoded = b64decode(data)
157 if not decoded:
158 time.sleep(5)
159 else:
160 break
161 else:
162 data = ec2.get_password_data(instance_id)
163 decoded = b64decode(data)
164
165 if wait and datetime.datetime.now() >= end:
166 module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
167
168 if key_file is not None and b_key_data is None:
169 try:
170 with open(key_file, 'rb') as f:
171 key = load_pem_private_key(f.read(), b_key_passphrase, default_backend())
172 except IOError as e:
173 # Handle bad files
174 module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
175 except (ValueError, TypeError) as e:
176 # Handle issues loading key
177 module.fail_json(msg="unable to parse key file")
178 elif b_key_data is not None and key_file is None:
179 try:
180 key = load_pem_private_key(b_key_data, b_key_passphrase, default_backend())
181 except (ValueError, TypeError) as e:
182 module.fail_json(msg="unable to parse key data")
183
184 try:
185 decrypted = key.decrypt(decoded, PKCS1v15())
186 except ValueError as e:
187 decrypted = None
188
189 if decrypted is None:
190 module.exit_json(win_password='', changed=False)
191 else:
192 if wait:
193 elapsed = datetime.datetime.now() - start
194 module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
195 else:
196 module.exit_json(win_password=decrypted, changed=True)
197
198
199 def main():
200 module = setup_module_object()
201
202 if not HAS_BOTO:
203 module.fail_json(msg='Boto required for this module.')
204
205 if not HAS_CRYPTOGRAPHY:
206 module.fail_json(msg='cryptography package required for this module.')
207
208 ec2_win_password(module)
209
210
211 if __name__ == '__main__':
212 main()
213
[end of plugins/modules/ec2_win_password.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/ec2_win_password.py b/plugins/modules/ec2_win_password.py
--- a/plugins/modules/ec2_win_password.py
+++ b/plugins/modules/ec2_win_password.py
@@ -187,13 +187,13 @@
decrypted = None
if decrypted is None:
- module.exit_json(win_password='', changed=False)
+ module.fail_json(msg="unable to decrypt password", win_password='', changed=False)
else:
if wait:
elapsed = datetime.datetime.now() - start
- module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
+ module.exit_json(win_password=decrypted, changed=False, elapsed=elapsed.seconds)
else:
- module.exit_json(win_password=decrypted, changed=True)
+ module.exit_json(win_password=decrypted, changed=False)
def main():
| {"golden_diff": "diff --git a/plugins/modules/ec2_win_password.py b/plugins/modules/ec2_win_password.py\n--- a/plugins/modules/ec2_win_password.py\n+++ b/plugins/modules/ec2_win_password.py\n@@ -187,13 +187,13 @@\n decrypted = None\n \n if decrypted is None:\n- module.exit_json(win_password='', changed=False)\n+ module.fail_json(msg=\"unable to decrypt password\", win_password='', changed=False)\n else:\n if wait:\n elapsed = datetime.datetime.now() - start\n- module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)\n+ module.exit_json(win_password=decrypted, changed=False, elapsed=elapsed.seconds)\n else:\n- module.exit_json(win_password=decrypted, changed=True)\n+ module.exit_json(win_password=decrypted, changed=False)\n \n \n def main():\n", "issue": "ec2_win_password returns success when it fails to decode the password\n### SUMMARY\r\nAn unsuccessful decode call returns:\r\n\r\n```\r\nok: [localhost] => {\r\n \"changed\": false,\r\n \"invocation\": {\r\n \"module_args\": {\r\n [trimmed]\r\n }\r\n },\r\n \"win_password\": \"\"\r\n }\r\n```\r\n\r\nI would expect it to return a failure state\n", "before_files": [{"content": "#!/usr/bin/python\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_win_password\nversion_added: 1.0.0\nshort_description: Gets the default administrator password for ec2 windows instances\ndescription:\n - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).\n - This module has a dependency on python-boto.\nauthor: \"Rick Mendes (@rickmendes)\"\noptions:\n instance_id:\n description:\n - The instance id to get the password data from.\n required: true\n type: str\n key_file:\n description:\n - Path to the file containing the key pair used on the instance.\n - Conflicts with I(key_data).\n required: false\n type: path\n key_data:\n description:\n - The private key (usually stored in vault).\n - Conflicts with I(key_file),\n required: false\n type: str\n key_passphrase:\n description:\n - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to\n convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3).\n type: str\n wait:\n description:\n - Whether or not to wait for the password to be available before returning.\n type: bool\n default: false\n wait_timeout:\n description:\n - Number of seconds to wait before giving up.\n default: 120\n type: int\n\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n\nrequirements:\n - cryptography\n\nnotes:\n - As of Ansible 2.4, this module requires the python cryptography module rather than the\n older pycrypto module.\n'''\n\nEXAMPLES = '''\n# Example of getting a password\n- name: get the Administrator password\n community.aws.ec2_win_password:\n profile: my-boto-profile\n instance_id: i-XXXXXX\n region: us-east-1\n key_file: \"~/aws-creds/my_test_key.pem\"\n\n# Example of getting a password using a variable\n- name: get the Administrator password\n community.aws.ec2_win_password:\n profile: my-boto-profile\n instance_id: i-XXXXXX\n region: us-east-1\n key_data: \"{{ ec2_private_key }}\"\n\n# Example of getting a password with a password protected key\n- name: get the Administrator password\n community.aws.ec2_win_password:\n profile: my-boto-profile\n instance_id: i-XXXXXX\n region: us-east-1\n key_file: \"~/aws-creds/my_protected_test_key.pem\"\n key_passphrase: \"secret\"\n\n# Example of waiting for a password\n- name: get the Administrator password\n community.aws.ec2_win_password:\n profile: my-boto-profile\n instance_id: i-XXXXXX\n region: us-east-1\n key_file: \"~/aws-creds/my_test_key.pem\"\n wait: yes\n wait_timeout: 45\n'''\n\nimport datetime\nimport time\nfrom base64 import b64decode\n\ntry:\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15\n from cryptography.hazmat.primitives.serialization import load_pem_private_key\n HAS_CRYPTOGRAPHY = True\nexcept ImportError:\n HAS_CRYPTOGRAPHY = False\n\nfrom ansible.module_utils._text import to_bytes\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import ec2_connect\n\n\ndef setup_module_object():\n argument_spec = dict(\n instance_id=dict(required=True),\n key_file=dict(required=False, default=None, type='path'),\n key_passphrase=dict(no_log=True, default=None, required=False),\n key_data=dict(no_log=True, default=None, required=False),\n wait=dict(type='bool', default=False, required=False),\n wait_timeout=dict(default=120, required=False, type='int'),\n )\n module = AnsibleAWSModule(argument_spec=argument_spec)\n return module\n\n\ndef ec2_win_password(module):\n instance_id = module.params.get('instance_id')\n key_file = module.params.get('key_file')\n if module.params.get('key_passphrase') is None:\n b_key_passphrase = None\n else:\n b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict')\n if module.params.get('key_data') is None:\n b_key_data = None\n else:\n b_key_data = to_bytes(module.params.get('key_data'), errors='surrogate_or_strict')\n wait = module.params.get('wait')\n wait_timeout = module.params.get('wait_timeout')\n\n ec2 = ec2_connect(module)\n\n if wait:\n start = datetime.datetime.now()\n end = start + datetime.timedelta(seconds=wait_timeout)\n\n while datetime.datetime.now() < end:\n data = ec2.get_password_data(instance_id)\n decoded = b64decode(data)\n if not decoded:\n time.sleep(5)\n else:\n break\n else:\n data = ec2.get_password_data(instance_id)\n decoded = b64decode(data)\n\n if wait and datetime.datetime.now() >= end:\n module.fail_json(msg=\"wait for password timeout after %d seconds\" % wait_timeout)\n\n if key_file is not None and b_key_data is None:\n try:\n with open(key_file, 'rb') as f:\n key = load_pem_private_key(f.read(), b_key_passphrase, default_backend())\n except IOError as e:\n # Handle bad files\n module.fail_json(msg=\"I/O error (%d) opening key file: %s\" % (e.errno, e.strerror))\n except (ValueError, TypeError) as e:\n # Handle issues loading key\n module.fail_json(msg=\"unable to parse key file\")\n elif b_key_data is not None and key_file is None:\n try:\n key = load_pem_private_key(b_key_data, b_key_passphrase, default_backend())\n except (ValueError, TypeError) as e:\n module.fail_json(msg=\"unable to parse key data\")\n\n try:\n decrypted = key.decrypt(decoded, PKCS1v15())\n except ValueError as e:\n decrypted = None\n\n if decrypted is None:\n module.exit_json(win_password='', changed=False)\n else:\n if wait:\n elapsed = datetime.datetime.now() - start\n module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)\n else:\n module.exit_json(win_password=decrypted, changed=True)\n\n\ndef main():\n module = setup_module_object()\n\n if not HAS_BOTO:\n module.fail_json(msg='Boto required for this module.')\n\n if not HAS_CRYPTOGRAPHY:\n module.fail_json(msg='cryptography package required for this module.')\n\n ec2_win_password(module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_win_password.py"}]} | 2,784 | 186 |
gh_patches_debug_27286 | rasdani/github-patches | git_diff | psychopy__psychopy-739 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gui import from psychopy not woriking
Hi all,
im trying to run a psychopy script from terminal but I get this error:
Traceback (most recent call last):
File "nf_test_lastrun.py", line 11, in <module>
from psychopy import visual, core, data, event, logging, sound, gui
File "/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/gui.py", line 11, in <module>
from psychopy.app import localization
File "/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py", line 89, in <module>
languageID, lang = getID()
File "/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py", line 78, in getID
val = codeFromWxId[wx.LANGUAGE_DEFAULT]
KeyError: 0
when I open python and try to import from python, all work but gui.
any suggestions
thanks
clemens
</issue>
<code>
[start of psychopy/app/localization/__init__.py]
1 #!/usr/bin/env python2
2 # -*- coding: utf-8 -*-
3
4 """Language localization for PsychoPy.
5
6 Sets the locale value as a wx languageID (int) and initializes gettext translation _translate():
7 from psychopy.app import localization
8 """
9
10 # Part of the PsychoPy library
11 # Copyright (C) 2014 Jonathan Peirce
12 # Distributed under the terms of the GNU General Public License (GPL).
13
14 # Author: Jeremy Gray, July 2014
15
16
17 import gettext
18 import os, sys, glob, codecs
19 from psychopy import logging, prefs
20
21 import wx
22
23 # need a wx App for wx.Locale:
24 try:
25 wx.Dialog(None, -1)
26 except wx._core.PyNoAppError:
27 if wx.version() < '2.9':
28 tmpApp = wx.PySimpleApp()
29 else:
30 tmpApp = wx.App(False)
31
32 # Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)
33 locale = wx.Locale()
34 aliases = {}
35 wxIdFromCode = {} # int: 0, 2-229
36 codeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows
37 winmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)
38 locname = {} # descriptive name, if available; 5-letter code if not
39 reverseMap = {}
40
41 for i in range(230):
42 info = locale.GetLanguageInfo(i)
43 if info:
44 aliases[info.Description] = info.CanonicalName # mix of forms: ja or ja_JP
45 wxIdFromCode[info.CanonicalName] = i
46 codeFromWxId[i] = info.CanonicalName
47
48 mappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')
49 for line in codecs.open(mappings, 'rU', 'utf8').readlines():
50 try:
51 can, win, name = line.strip().split(' ', 2) # canonical, windows, name-with-spaces
52 except ValueError:
53 can, win = line.strip().split(' ', 1)
54 name = can
55 winmap[can] = win
56 locname[can] = name
57 reverseMap[name] = can
58
59 # what are the available translations? available languages on the OS?
60 expr = os.path.join(os.path.dirname(__file__), '..', 'locale', '*')
61 available = sorted(map(os.path.basename, glob.glob(expr)))
62 sysAvail = [str(l) for l in codeFromWxId.values() # installed language packs
63 if l and locale.IsAvailable(wxIdFromCode[l])]
64
65 def getID(lang=None):
66 """Get wx ID of language to use for translations: `lang`, pref, or system default.
67
68 `lang` is a 5 char `language_REGION`, eg ja_JP
69 """
70 if lang:
71 val = lang
72 else:
73 try:
74 val = prefs.app['locale']
75 except KeyError:
76 val = locale.GetLocale() # wx.Locale, no encoding
77 if not val:
78 val = codeFromWxId[wx.LANGUAGE_DEFAULT]
79 try:
80 # out-dated: [can't set wx.Locale here because no app yet] now there is an app
81 # here just determine the value to be used when it can be set
82 language = wxIdFromCode[val]
83 except KeyError:
84 logging.error('locale %s not known to wx.Locale, using default' % val)
85 language = wx.LANGUAGE_DEFAULT
86
87 return language, val
88
89 languageID, lang = getID()
90 #use lang like this:
91 #import locale -- the non-wx version of locale
92 #
93 #if sys.platform.startswith('win'):
94 # v = winmap[val]
95 #else: v=val
96 #locale.setlocale(locale.LC_ALL, (v, 'UTF-8'))
97
98 # set locale before splash screen:
99 if locale.IsAvailable(languageID):
100 wxlocale = wx.Locale(languageID)
101 else:
102 wxlocale = wx.Locale(wx.LANGUAGE_DEFAULT)
103
104 # ideally rewrite the following using wxlocale only:
105 path = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep
106 mofile = os.path.join(path, 'messages.mo')
107 try:
108 logging.debug("Opening message catalog %s for locale %s" % (mofile, lang))
109 trans = gettext.GNUTranslations(open(mofile, "rb"))
110 except IOError:
111 logging.debug("Locale for '%s' not found. Using default." % lang)
112 trans = gettext.NullTranslations()
113 trans.install(unicode=True)
114
115 # to avoid a crash, PsychoPy app uses a nonstandard name _translate instead of _
116 # seems like a var in a dependency is named _, clobbering _ as global translation:
117 __builtins__['_translate'] = _
118 del(__builtins__['_']) # idea: force psychopy code to use _translate
119
120
121 #__builtins__['_'] = wx.GetTranslation
122 # this seems to have no effect, needs more investigation:
123 #path = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep
124 #wxlocale.AddCatalogLookupPathPrefix(path)
125
[end of psychopy/app/localization/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/app/localization/__init__.py b/psychopy/app/localization/__init__.py
--- a/psychopy/app/localization/__init__.py
+++ b/psychopy/app/localization/__init__.py
@@ -31,13 +31,10 @@
# Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)
locale = wx.Locale()
-aliases = {}
-wxIdFromCode = {} # int: 0, 2-229
-codeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows
-winmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)
-locname = {} # descriptive name, if available; 5-letter code if not
-reverseMap = {}
-
+aliases = {u'English (U.S.)': 'en_US'}
+# set defaults because locale.GetLanguageInfo(0) can return None on some systems:
+wxIdFromCode = {'en_US': wx.LANGUAGE_DEFAULT} # int: 0 default, 2-229
+codeFromWxId = {wx.LANGUAGE_DEFAULT: 'en_US'} # used in directory names e.g. ja_JP; never JPN ala Windows
for i in range(230):
info = locale.GetLanguageInfo(i)
if info:
@@ -45,6 +42,10 @@
wxIdFromCode[info.CanonicalName] = i
codeFromWxId[i] = info.CanonicalName
+# read all known mappings cross-platform from a file:
+winmap = {'en_US': 'ENU'} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)
+locname = {'en_US': u'English (U.S.)'} # descriptive name, if available; 5-letter code if not
+reverseMap = {u'English (U.S.)': 'en_US'}
mappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')
for line in codecs.open(mappings, 'rU', 'utf8').readlines():
try:
| {"golden_diff": "diff --git a/psychopy/app/localization/__init__.py b/psychopy/app/localization/__init__.py\n--- a/psychopy/app/localization/__init__.py\n+++ b/psychopy/app/localization/__init__.py\n@@ -31,13 +31,10 @@\n \n # Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)\n locale = wx.Locale()\n-aliases = {}\n-wxIdFromCode = {} # int: 0, 2-229\n-codeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows\n-winmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)\n-locname = {} # descriptive name, if available; 5-letter code if not\n-reverseMap = {}\n-\n+aliases = {u'English (U.S.)': 'en_US'}\n+# set defaults because locale.GetLanguageInfo(0) can return None on some systems:\n+wxIdFromCode = {'en_US': wx.LANGUAGE_DEFAULT} # int: 0 default, 2-229\n+codeFromWxId = {wx.LANGUAGE_DEFAULT: 'en_US'} # used in directory names e.g. ja_JP; never JPN ala Windows\n for i in range(230):\n info = locale.GetLanguageInfo(i)\n if info:\n@@ -45,6 +42,10 @@\n wxIdFromCode[info.CanonicalName] = i\n codeFromWxId[i] = info.CanonicalName\n \n+# read all known mappings cross-platform from a file:\n+winmap = {'en_US': 'ENU'} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)\n+locname = {'en_US': u'English (U.S.)'} # descriptive name, if available; 5-letter code if not\n+reverseMap = {u'English (U.S.)': 'en_US'}\n mappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')\n for line in codecs.open(mappings, 'rU', 'utf8').readlines():\n try:\n", "issue": "gui import from psychopy not woriking\nHi all,\n\nim trying to run a psychopy script from terminal but I get this error:\n\nTraceback (most recent call last):\n File \"nf_test_lastrun.py\", line 11, in <module>\n from psychopy import visual, core, data, event, logging, sound, gui\n File \"/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/gui.py\", line 11, in <module>\n from psychopy.app import localization\n File \"/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py\", line 89, in <module>\n languageID, lang = getID()\n File \"/Library/Python/2.7/site-packages/PsychoPy-1.81.00-py2.7.egg/psychopy/app/localization/**init**.py\", line 78, in getID\n val = codeFromWxId[wx.LANGUAGE_DEFAULT]\nKeyError: 0\n\nwhen I open python and try to import from python, all work but gui.\n\nany suggestions\nthanks\nclemens\n\n", "before_files": [{"content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\"\"\"Language localization for PsychoPy.\n\nSets the locale value as a wx languageID (int) and initializes gettext translation _translate():\n from psychopy.app import localization\n\"\"\"\n\n# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# Author: Jeremy Gray, July 2014\n\n\nimport gettext\nimport os, sys, glob, codecs\nfrom psychopy import logging, prefs\n\nimport wx\n\n# need a wx App for wx.Locale:\ntry:\n wx.Dialog(None, -1)\nexcept wx._core.PyNoAppError:\n if wx.version() < '2.9':\n tmpApp = wx.PySimpleApp()\n else:\n tmpApp = wx.App(False)\n\n# Get a dict of locale aliases from wx.Locale() -- same cross-platform (Win 7, Mac 10.9)\nlocale = wx.Locale()\naliases = {}\nwxIdFromCode = {} # int: 0, 2-229\ncodeFromWxId = {} # used in directory names e.g. ja_JP; never JPN ala Windows\nwinmap = {} # get windows 3-letter code (=val) from canonical form (=key); use only for setting locale (non-wx)\nlocname = {} # descriptive name, if available; 5-letter code if not\nreverseMap = {}\n\nfor i in range(230):\n info = locale.GetLanguageInfo(i)\n if info:\n aliases[info.Description] = info.CanonicalName # mix of forms: ja or ja_JP\n wxIdFromCode[info.CanonicalName] = i\n codeFromWxId[i] = info.CanonicalName\n\nmappings = os.path.join(os.path.dirname(__file__), 'mappings.txt')\nfor line in codecs.open(mappings, 'rU', 'utf8').readlines():\n try:\n can, win, name = line.strip().split(' ', 2) # canonical, windows, name-with-spaces\n except ValueError:\n can, win = line.strip().split(' ', 1)\n name = can\n winmap[can] = win\n locname[can] = name\n reverseMap[name] = can\n\n# what are the available translations? available languages on the OS?\nexpr = os.path.join(os.path.dirname(__file__), '..', 'locale', '*')\navailable = sorted(map(os.path.basename, glob.glob(expr)))\nsysAvail = [str(l) for l in codeFromWxId.values() # installed language packs\n if l and locale.IsAvailable(wxIdFromCode[l])]\n\ndef getID(lang=None):\n \"\"\"Get wx ID of language to use for translations: `lang`, pref, or system default.\n\n `lang` is a 5 char `language_REGION`, eg ja_JP\n \"\"\"\n if lang:\n val = lang\n else:\n try:\n val = prefs.app['locale']\n except KeyError:\n val = locale.GetLocale() # wx.Locale, no encoding\n if not val:\n val = codeFromWxId[wx.LANGUAGE_DEFAULT]\n try:\n # out-dated: [can't set wx.Locale here because no app yet] now there is an app\n # here just determine the value to be used when it can be set\n language = wxIdFromCode[val]\n except KeyError:\n logging.error('locale %s not known to wx.Locale, using default' % val)\n language = wx.LANGUAGE_DEFAULT\n\n return language, val\n\nlanguageID, lang = getID()\n#use lang like this:\n#import locale -- the non-wx version of locale\n#\n#if sys.platform.startswith('win'):\n# v = winmap[val]\n#else: v=val\n#locale.setlocale(locale.LC_ALL, (v, 'UTF-8'))\n\n# set locale before splash screen:\nif locale.IsAvailable(languageID):\n wxlocale = wx.Locale(languageID)\nelse:\n wxlocale = wx.Locale(wx.LANGUAGE_DEFAULT)\n\n# ideally rewrite the following using wxlocale only:\npath = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep\nmofile = os.path.join(path, 'messages.mo')\ntry:\n logging.debug(\"Opening message catalog %s for locale %s\" % (mofile, lang))\n trans = gettext.GNUTranslations(open(mofile, \"rb\"))\nexcept IOError:\n logging.debug(\"Locale for '%s' not found. Using default.\" % lang)\n trans = gettext.NullTranslations()\ntrans.install(unicode=True)\n\n# to avoid a crash, PsychoPy app uses a nonstandard name _translate instead of _\n# seems like a var in a dependency is named _, clobbering _ as global translation:\n__builtins__['_translate'] = _\ndel(__builtins__['_']) # idea: force psychopy code to use _translate\n\n\n#__builtins__['_'] = wx.GetTranslation\n# this seems to have no effect, needs more investigation:\n#path = os.path.join(os.path.dirname(__file__), '..', 'locale', lang, 'LC_MESSAGE') + os.sep\n#wxlocale.AddCatalogLookupPathPrefix(path)\n", "path": "psychopy/app/localization/__init__.py"}]} | 2,248 | 507 |
gh_patches_debug_4447 | rasdani/github-patches | git_diff | Mailu__Mailu-1910 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rate limiting changes for 1.8
Copied from #1582. For 1.8 we will for now increase rate limiting value and disable rate limiting for the subnet.
- Rate limiting
- Document rate limiting
- Currently the subnet is included in the rate limiting. This means that a user who repeatly fails to login the webmail, blocks the webmail for ALL users.
- For 1.8 and master
- in mailu.env set the rate limit to a high value.
- in mailu.env disable the rate limiter for the subnet.
- And document this of course and change this in the documentation
- Set status blocked on lubs pull request and request to further discuss this for mailu 1.9.
- Make authentication fast #1745 is a draft pr from nextgens which contains a solution for this problem.
- We need a new issue and PR for making these changes.
</issue>
<code>
[start of core/admin/mailu/configuration.py]
1 import os
2
3 from datetime import timedelta
4 from socrate import system
5
6 DEFAULT_CONFIG = {
7 # Specific to the admin UI
8 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
9 'BABEL_DEFAULT_LOCALE': 'en',
10 'BABEL_DEFAULT_TIMEZONE': 'UTC',
11 'BOOTSTRAP_SERVE_LOCAL': True,
12 'RATELIMIT_STORAGE_URL': '',
13 'QUOTA_STORAGE_URL': '',
14 'DEBUG': False,
15 'DOMAIN_REGISTRATION': False,
16 'TEMPLATES_AUTO_RELOAD': True,
17 'MEMORY_SESSIONS': False,
18 # Database settings
19 'DB_FLAVOR': None,
20 'DB_USER': 'mailu',
21 'DB_PW': None,
22 'DB_HOST': 'database',
23 'DB_NAME': 'mailu',
24 'SQLITE_DATABASE_FILE':'data/main.db',
25 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
26 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
27 # Statistics management
28 'INSTANCE_ID_PATH': '/data/instance',
29 'STATS_ENDPOINT': '18.{}.stats.mailu.io',
30 # Common configuration variables
31 'SECRET_KEY': 'changeMe',
32 'DOMAIN': 'mailu.io',
33 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
34 'POSTMASTER': 'postmaster',
35 'TLS_FLAVOR': 'cert',
36 'INBOUND_TLS_ENFORCE': False,
37 'AUTH_RATELIMIT': '10/minute;1000/hour',
38 'AUTH_RATELIMIT_SUBNET': True,
39 'DISABLE_STATISTICS': False,
40 # Mail settings
41 'DMARC_RUA': None,
42 'DMARC_RUF': None,
43 'WELCOME': False,
44 'WELCOME_SUBJECT': 'Dummy welcome topic',
45 'WELCOME_BODY': 'Dummy welcome body',
46 'DKIM_SELECTOR': 'dkim',
47 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
48 'DEFAULT_QUOTA': 1000000000,
49 # Web settings
50 'SITENAME': 'Mailu',
51 'WEBSITE': 'https://mailu.io',
52 'WEB_ADMIN': '/admin',
53 'WEB_WEBMAIL': '/webmail',
54 'WEBMAIL': 'none',
55 'RECAPTCHA_PUBLIC_KEY': '',
56 'RECAPTCHA_PRIVATE_KEY': '',
57 # Advanced settings
58 'LOG_LEVEL': 'WARNING',
59 'SESSION_KEY_BITS': 128,
60 'SESSION_LIFETIME': 24,
61 'SESSION_COOKIE_SECURE': True,
62 'CREDENTIAL_ROUNDS': 12,
63 # Host settings
64 'HOST_IMAP': 'imap',
65 'HOST_LMTP': 'imap:2525',
66 'HOST_POP3': 'imap',
67 'HOST_SMTP': 'smtp',
68 'HOST_AUTHSMTP': 'smtp',
69 'HOST_ADMIN': 'admin',
70 'HOST_WEBMAIL': 'webmail',
71 'HOST_WEBDAV': 'webdav:5232',
72 'HOST_REDIS': 'redis',
73 'HOST_FRONT': 'front',
74 'SUBNET': '192.168.203.0/24',
75 'SUBNET6': None,
76 'POD_ADDRESS_RANGE': None
77 }
78
79 class ConfigManager(dict):
80 """ Naive configuration manager that uses environment only
81 """
82
83 DB_TEMPLATES = {
84 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
85 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
86 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
87 }
88
89 def __init__(self):
90 self.config = dict()
91
92 def get_host_address(self, name):
93 # if MYSERVICE_ADDRESS is defined, use this
94 if '{}_ADDRESS'.format(name) in os.environ:
95 return os.environ.get('{}_ADDRESS'.format(name))
96 # otherwise use the host name and resolve it
97 return system.resolve_address(self.config['HOST_{}'.format(name)])
98
99 def resolve_hosts(self):
100 self.config["IMAP_ADDRESS"] = self.get_host_address("IMAP")
101 self.config["POP3_ADDRESS"] = self.get_host_address("POP3")
102 self.config["AUTHSMTP_ADDRESS"] = self.get_host_address("AUTHSMTP")
103 self.config["SMTP_ADDRESS"] = self.get_host_address("SMTP")
104 self.config["REDIS_ADDRESS"] = self.get_host_address("REDIS")
105 if self.config["WEBMAIL"] != "none":
106 self.config["WEBMAIL_ADDRESS"] = self.get_host_address("WEBMAIL")
107
108 def __get_env(self, key, value):
109 key_file = key + "_FILE"
110 if key_file in os.environ:
111 with open(os.environ.get(key_file)) as file:
112 value_from_file = file.read()
113 return value_from_file.strip()
114 else:
115 return os.environ.get(key, value)
116
117 def __coerce_value(self, value):
118 if isinstance(value, str) and value.lower() in ('true','yes'):
119 return True
120 elif isinstance(value, str) and value.lower() in ('false', 'no'):
121 return False
122 return value
123
124 def init_app(self, app):
125 self.config.update(app.config)
126 # get environment variables
127 self.config.update({
128 key: self.__coerce_value(self.__get_env(key, value))
129 for key, value in DEFAULT_CONFIG.items()
130 })
131 self.resolve_hosts()
132
133 # automatically set the sqlalchemy string
134 if self.config['DB_FLAVOR']:
135 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
136 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
137
138 self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])
139 self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])
140 self.config['SESSION_STORAGE_URL'] = 'redis://{0}/3'.format(self.config['REDIS_ADDRESS'])
141 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
142 self.config['SESSION_COOKIE_HTTPONLY'] = True
143 self.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=int(self.config['SESSION_LIFETIME']))
144 # update the app config itself
145 app.config = self
146
147 def setdefault(self, key, value):
148 if key not in self.config:
149 self.config[key] = value
150 return self.config[key]
151
152 def get(self, *args):
153 return self.config.get(*args)
154
155 def keys(self):
156 return self.config.keys()
157
158 def __getitem__(self, key):
159 return self.config.get(key)
160
161 def __setitem__(self, key, value):
162 self.config[key] = value
163
164 def __contains__(self, key):
165 return key in self.config
166
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -34,8 +34,8 @@
'POSTMASTER': 'postmaster',
'TLS_FLAVOR': 'cert',
'INBOUND_TLS_ENFORCE': False,
- 'AUTH_RATELIMIT': '10/minute;1000/hour',
- 'AUTH_RATELIMIT_SUBNET': True,
+ 'AUTH_RATELIMIT': '1000/minute;10000/hour',
+ 'AUTH_RATELIMIT_SUBNET': False,
'DISABLE_STATISTICS': False,
# Mail settings
'DMARC_RUA': None,
| {"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -34,8 +34,8 @@\n 'POSTMASTER': 'postmaster',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n- 'AUTH_RATELIMIT': '10/minute;1000/hour',\n- 'AUTH_RATELIMIT_SUBNET': True,\n+ 'AUTH_RATELIMIT': '1000/minute;10000/hour',\n+ 'AUTH_RATELIMIT_SUBNET': False,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n", "issue": "Rate limiting changes for 1.8\nCopied from #1582. For 1.8 we will for now increase rate limiting value and disable rate limiting for the subnet. \r\n\r\n- Rate limiting\r\n - Document rate limiting\r\n - Currently the subnet is included in the rate limiting. This means that a user who repeatly fails to login the webmail, blocks the webmail for ALL users.\r\n\r\n- For 1.8 and master\r\n - in mailu.env set the rate limit to a high value.\r\n - in mailu.env disable the rate limiter for the subnet.\r\n - And document this of course and change this in the documentation\r\n - Set status blocked on lubs pull request and request to further discuss this for mailu 1.9.\r\n - Make authentication fast #1745 is a draft pr from nextgens which contains a solution for this problem.\r\n - We need a new issue and PR for making these changes.\n", "before_files": [{"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '18.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'AUTH_RATELIMIT': '10/minute;1000/hour',\n 'AUTH_RATELIMIT_SUBNET': True,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_LIFETIME': 24,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if '{}_ADDRESS'.format(name) in os.environ:\n return os.environ.get('{}_ADDRESS'.format(name))\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config['HOST_{}'.format(name)])\n\n def resolve_hosts(self):\n self.config[\"IMAP_ADDRESS\"] = self.get_host_address(\"IMAP\")\n self.config[\"POP3_ADDRESS\"] = self.get_host_address(\"POP3\")\n self.config[\"AUTHSMTP_ADDRESS\"] = self.get_host_address(\"AUTHSMTP\")\n self.config[\"SMTP_ADDRESS\"] = self.get_host_address(\"SMTP\")\n self.config[\"REDIS_ADDRESS\"] = self.get_host_address(\"REDIS\")\n if self.config[\"WEBMAIL\"] != \"none\":\n self.config[\"WEBMAIL_ADDRESS\"] = self.get_host_address(\"WEBMAIL\")\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])\n self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_STORAGE_URL'] = 'redis://{0}/3'.format(self.config['REDIS_ADDRESS'])\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=int(self.config['SESSION_LIFETIME']))\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py"}]} | 2,626 | 163 |
gh_patches_debug_33740 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-1437 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: FreqCacheEmbeddingBag._weight.ProcessGroup is initialized before _weight initialized
### 🐛 Describe the bug
When I init a DLRM with ParallelFreqAwareEmbeddingBag, the bug is reported as the following:

I believe that is because the [ParallelFreqAwareEmbeddingBag](https://github.com/hpcaitech/ColossalAI/blob/039b7ed3bc33173e36c5c4decd41f8d7b1ec0f45/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py#L60) init its `_weight.ProcessGroup` before the `_weight` is initialized.
After I swap above line with its next line, the traceback shows another error:

It looks like some api update issue.
### Environment
_No response_
</issue>
<code>
[start of colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py]
1 import torch
2 import torch.nn.functional as F
3 from typing import List, Optional, Iterator, Tuple
4
5 from .base_embedding import BaseEmbeddingBag
6 from .cache_mgr import CachedParamMgr
7 from torch.nn.parameter import Parameter
8 from .._utils import dual_all_to_all
9
10 from colossalai.tensor import ColoParameter, ShardSpec, ComputeSpec, ComputePattern, ProcessGroup
11
12
13 def get_partition(embedding_dim, rank, world_size) -> Tuple[int, int, bool]:
14 if world_size == 1:
15 return 0, embedding_dim, True
16
17 assert embedding_dim >= world_size, \
18 f"Embedding dimension {embedding_dim} must be larger than the world size " \
19 f"{world_size} of the process group"
20 chunk_size = embedding_dim // world_size
21 threshold = embedding_dim % world_size
22 # if embedding dim is divisible by world size
23 if threshold == 0:
24 return rank * chunk_size, (rank + 1) * chunk_size, True
25
26 # align with the split strategy of torch.tensor_split
27 size_list = [chunk_size + 1 if i < threshold else chunk_size for i in range(world_size)]
28 offset = sum(size_list[:rank])
29 return offset, offset + size_list[rank], False
30
31
32 class ParallelFreqAwareEmbeddingBag(BaseEmbeddingBag):
33
34 def __init__(self,
35 num_embeddings,
36 embedding_dim,
37 padding_idx=None,
38 max_norm=None,
39 norm_type=2.,
40 scale_grad_by_freq=False,
41 sparse=False,
42 _weight=None,
43 mode='mean',
44 include_last_offset=False,
45 dtype=None,
46 debug=True):
47 super(ParallelFreqAwareEmbeddingBag,
48 self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq,
49 sparse, mode, include_last_offset)
50
51 self.rank = torch.distributed.get_rank()
52 self.world_size = torch.distributed.get_world_size()
53 self.debug = debug
54
55 self.partition_start_index, self.partition_end_index, divisible = get_partition(
56 embedding_dim, self.rank, self.world_size)
57 self.embedding_dim_per_partition = self.partition_end_index - self.partition_start_index
58
59 if _weight is None:
60 self._weight.process_group = ProcessGroup(tp_degree=self.world_size)
61 self._weight = ColoParameter.from_torch_tensor(torch.empty(self.num_embeddings,
62 self.embedding_dim_per_partition,
63 device='cpu',
64 dtype=dtype),
65 requires_grad=True,
66 spec=ShardSpec(dims=[-1], num_partitions=[self.world_size]))
67 self.init_parameters()
68 else:
69 assert isinstance(_weight, ColoParameter), "initialized weight must in type of ColoParameter"
70 self._weight = _weight
71
72 @property
73 def weight(self):
74 return self.cache_weight_mgr.cpu_weight
75
76 def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Parameter]]:
77 yield 'weight', self.cache_weight_mgr.cuda_cached_weight
78
79 def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
80 yield self.cache_weight_mgr.cuda_cached_weight
81
82 @torch.no_grad()
83 def init_parameters(self):
84 self._weight.data.uniform_(-1 / self.num_embeddings, 1 / self.num_embeddings)
85 if self.padding_idx is not None:
86 self._weight[self.padding_idx].fill_(0)
87
88 def preprocess(self,
89 cuda_row_num: int,
90 ids_freq_mapping: Optional[List[int]] = None,
91 warmup_ratio: float = 0.7,
92 buffer_size: int = 50_000):
93 self.cache_weight_mgr = CachedParamMgr(self._weight, cuda_row_num, buffer_size=buffer_size)
94 self.cache_weight_mgr.reorder(ids_freq_mapping, warmup_ratio)
95
96 def forward(self, indices, offsets=None, per_sample_weights=None, shape_hook=None, scatter_dim=0, gather_dim=-1):
97 with torch.no_grad():
98 reorder_ids = self.cache_weight_mgr.prepare_ids(indices)
99
100 output_shard = F.embedding_bag(reorder_ids, self.cache_weight_mgr.cuda_cached_weight, offsets, self.max_norm,
101 self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse,
102 per_sample_weights, self.include_last_offset, self.padding_idx)
103
104 if shape_hook is not None:
105 output_shard = shape_hook(output_shard)
106
107 output_full = dual_all_to_all(output_shard,
108 self._weight.get_process_group(),
109 scatter_dim=scatter_dim,
110 gather_dim=gather_dim)
111 return output_full
112
113 @classmethod
114 def from_pretrained(cls,
115 embedding: torch.Tensor,
116 freeze: bool = True,
117 padding_idx: Optional[int] = None,
118 max_norm: Optional[float] = None,
119 norm_type: float = 2.,
120 scale_grad_by_freq: bool = False,
121 sparse: bool = False,
122 mode: str = 'mean',
123 include_last_offset: bool = False,
124 debug: bool = True,
125 cuda_row_num: int = 100_000,
126 ids_freq_mapping: Optional[List[int]] = None,
127 warmup_ratio: float = 0.7) -> 'ParallelFreqAwareEmbeddingBag':
128 rows, cols = embedding.shape
129 embedding_bag = cls(rows, cols, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, embedding, mode,
130 include_last_offset, debug)
131 embedding_bag.preprocess(cuda_row_num, ids_freq_mapping, warmup_ratio)
132 embedding_bag.cache_weight_mgr.cuda_cached_weight.requires_grad_ = not freeze
133 return embedding_bag
134
[end of colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py b/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py
--- a/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py
+++ b/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py
@@ -7,7 +7,7 @@
from torch.nn.parameter import Parameter
from .._utils import dual_all_to_all
-from colossalai.tensor import ColoParameter, ShardSpec, ComputeSpec, ComputePattern, ProcessGroup
+from colossalai.tensor import ColoParameter, ShardSpec, ComputeSpec, ComputePattern, ProcessGroup, ColoTensorSpec
def get_partition(embedding_dim, rank, world_size) -> Tuple[int, int, bool]:
@@ -57,13 +57,15 @@
self.embedding_dim_per_partition = self.partition_end_index - self.partition_start_index
if _weight is None:
- self._weight.process_group = ProcessGroup(tp_degree=self.world_size)
+ colo_tensor_spec = ColoTensorSpec(pg=ProcessGroup(tp_degree=self.world_size),
+ dist_attr=ShardSpec(dims=[-1], num_partitions=[self.world_size]),
+ compute_attr=ComputePattern.TP1D)
self._weight = ColoParameter.from_torch_tensor(torch.empty(self.num_embeddings,
self.embedding_dim_per_partition,
device='cpu',
dtype=dtype),
requires_grad=True,
- spec=ShardSpec(dims=[-1], num_partitions=[self.world_size]))
+ spec=colo_tensor_spec)
self.init_parameters()
else:
assert isinstance(_weight, ColoParameter), "initialized weight must in type of ColoParameter"
| {"golden_diff": "diff --git a/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py b/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py\n--- a/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py\n+++ b/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py\n@@ -7,7 +7,7 @@\n from torch.nn.parameter import Parameter\n from .._utils import dual_all_to_all\n \n-from colossalai.tensor import ColoParameter, ShardSpec, ComputeSpec, ComputePattern, ProcessGroup\n+from colossalai.tensor import ColoParameter, ShardSpec, ComputeSpec, ComputePattern, ProcessGroup, ColoTensorSpec\n \n \n def get_partition(embedding_dim, rank, world_size) -> Tuple[int, int, bool]:\n@@ -57,13 +57,15 @@\n self.embedding_dim_per_partition = self.partition_end_index - self.partition_start_index\n \n if _weight is None:\n- self._weight.process_group = ProcessGroup(tp_degree=self.world_size)\n+ colo_tensor_spec = ColoTensorSpec(pg=ProcessGroup(tp_degree=self.world_size),\n+ dist_attr=ShardSpec(dims=[-1], num_partitions=[self.world_size]),\n+ compute_attr=ComputePattern.TP1D)\n self._weight = ColoParameter.from_torch_tensor(torch.empty(self.num_embeddings,\n self.embedding_dim_per_partition,\n device='cpu',\n dtype=dtype),\n requires_grad=True,\n- spec=ShardSpec(dims=[-1], num_partitions=[self.world_size]))\n+ spec=colo_tensor_spec)\n self.init_parameters()\n else:\n assert isinstance(_weight, ColoParameter), \"initialized weight must in type of ColoParameter\"\n", "issue": "[BUG]: FreqCacheEmbeddingBag._weight.ProcessGroup is initialized before _weight initialized\n### \ud83d\udc1b Describe the bug\n\nWhen I init a DLRM with ParallelFreqAwareEmbeddingBag, the bug is reported as the following:\r\n\r\n\r\n\r\nI believe that is because the [ParallelFreqAwareEmbeddingBag](https://github.com/hpcaitech/ColossalAI/blob/039b7ed3bc33173e36c5c4decd41f8d7b1ec0f45/colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py#L60) init its `_weight.ProcessGroup` before the `_weight` is initialized.\r\n\r\nAfter I swap above line with its next line, the traceback shows another error:\r\n\r\n\r\nIt looks like some api update issue.\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom typing import List, Optional, Iterator, Tuple\n\nfrom .base_embedding import BaseEmbeddingBag\nfrom .cache_mgr import CachedParamMgr\nfrom torch.nn.parameter import Parameter\nfrom .._utils import dual_all_to_all\n\nfrom colossalai.tensor import ColoParameter, ShardSpec, ComputeSpec, ComputePattern, ProcessGroup\n\n\ndef get_partition(embedding_dim, rank, world_size) -> Tuple[int, int, bool]:\n if world_size == 1:\n return 0, embedding_dim, True\n\n assert embedding_dim >= world_size, \\\n f\"Embedding dimension {embedding_dim} must be larger than the world size \" \\\n f\"{world_size} of the process group\"\n chunk_size = embedding_dim // world_size\n threshold = embedding_dim % world_size\n # if embedding dim is divisible by world size\n if threshold == 0:\n return rank * chunk_size, (rank + 1) * chunk_size, True\n\n # align with the split strategy of torch.tensor_split\n size_list = [chunk_size + 1 if i < threshold else chunk_size for i in range(world_size)]\n offset = sum(size_list[:rank])\n return offset, offset + size_list[rank], False\n\n\nclass ParallelFreqAwareEmbeddingBag(BaseEmbeddingBag):\n\n def __init__(self,\n num_embeddings,\n embedding_dim,\n padding_idx=None,\n max_norm=None,\n norm_type=2.,\n scale_grad_by_freq=False,\n sparse=False,\n _weight=None,\n mode='mean',\n include_last_offset=False,\n dtype=None,\n debug=True):\n super(ParallelFreqAwareEmbeddingBag,\n self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq,\n sparse, mode, include_last_offset)\n\n self.rank = torch.distributed.get_rank()\n self.world_size = torch.distributed.get_world_size()\n self.debug = debug\n\n self.partition_start_index, self.partition_end_index, divisible = get_partition(\n embedding_dim, self.rank, self.world_size)\n self.embedding_dim_per_partition = self.partition_end_index - self.partition_start_index\n\n if _weight is None:\n self._weight.process_group = ProcessGroup(tp_degree=self.world_size)\n self._weight = ColoParameter.from_torch_tensor(torch.empty(self.num_embeddings,\n self.embedding_dim_per_partition,\n device='cpu',\n dtype=dtype),\n requires_grad=True,\n spec=ShardSpec(dims=[-1], num_partitions=[self.world_size]))\n self.init_parameters()\n else:\n assert isinstance(_weight, ColoParameter), \"initialized weight must in type of ColoParameter\"\n self._weight = _weight\n\n @property\n def weight(self):\n return self.cache_weight_mgr.cpu_weight\n\n def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Parameter]]:\n yield 'weight', self.cache_weight_mgr.cuda_cached_weight\n\n def parameters(self, recurse: bool = True) -> Iterator[Parameter]:\n yield self.cache_weight_mgr.cuda_cached_weight\n\n @torch.no_grad()\n def init_parameters(self):\n self._weight.data.uniform_(-1 / self.num_embeddings, 1 / self.num_embeddings)\n if self.padding_idx is not None:\n self._weight[self.padding_idx].fill_(0)\n\n def preprocess(self,\n cuda_row_num: int,\n ids_freq_mapping: Optional[List[int]] = None,\n warmup_ratio: float = 0.7,\n buffer_size: int = 50_000):\n self.cache_weight_mgr = CachedParamMgr(self._weight, cuda_row_num, buffer_size=buffer_size)\n self.cache_weight_mgr.reorder(ids_freq_mapping, warmup_ratio)\n\n def forward(self, indices, offsets=None, per_sample_weights=None, shape_hook=None, scatter_dim=0, gather_dim=-1):\n with torch.no_grad():\n reorder_ids = self.cache_weight_mgr.prepare_ids(indices)\n\n output_shard = F.embedding_bag(reorder_ids, self.cache_weight_mgr.cuda_cached_weight, offsets, self.max_norm,\n self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse,\n per_sample_weights, self.include_last_offset, self.padding_idx)\n\n if shape_hook is not None:\n output_shard = shape_hook(output_shard)\n\n output_full = dual_all_to_all(output_shard,\n self._weight.get_process_group(),\n scatter_dim=scatter_dim,\n gather_dim=gather_dim)\n return output_full\n\n @classmethod\n def from_pretrained(cls,\n embedding: torch.Tensor,\n freeze: bool = True,\n padding_idx: Optional[int] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2.,\n scale_grad_by_freq: bool = False,\n sparse: bool = False,\n mode: str = 'mean',\n include_last_offset: bool = False,\n debug: bool = True,\n cuda_row_num: int = 100_000,\n ids_freq_mapping: Optional[List[int]] = None,\n warmup_ratio: float = 0.7) -> 'ParallelFreqAwareEmbeddingBag':\n rows, cols = embedding.shape\n embedding_bag = cls(rows, cols, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, embedding, mode,\n include_last_offset, debug)\n embedding_bag.preprocess(cuda_row_num, ids_freq_mapping, warmup_ratio)\n embedding_bag.cache_weight_mgr.cuda_cached_weight.requires_grad_ = not freeze\n return embedding_bag\n", "path": "colossalai/nn/_ops/cache_embedding/parallel_freq_aware_embedding.py"}]} | 2,356 | 374 |
gh_patches_debug_11207 | rasdani/github-patches | git_diff | iterative__dvc-5205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
diff: unexpected error when diffing workspace after dvc remove
# Bug Report
## Description
`dvc diff` will raise unexpected error with no other output if both the `.dvc` file and the original data file are removed from the workspace (i.e. after running `dvc remove`).
```
$ git status ⏎
On branch master
Changes not staged for commit:
deleted: .gitignore
deleted: foo.txt.dvc
no changes added to commit
$ dvc diff -v
2020-12-28 15:43:46,270 DEBUG: Check for update is enabled.
2020-12-28 15:43:46,584 ERROR: unexpected error
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/pmrowla/git/dvc/dvc/main.py", line 90, in main
ret = cmd.run()
File "/Users/pmrowla/git/dvc/dvc/command/diff.py", line 131, in run
diff = self.repo.diff(
File "/Users/pmrowla/git/dvc/dvc/repo/__init__.py", line 53, in wrapper
return f(repo, *args, **kwargs)
File "/Users/pmrowla/git/dvc/dvc/repo/diff.py", line 60, in diff
missing = sorted(_filter_missing(self, deleted_or_missing))
File "/Users/pmrowla/git/dvc/dvc/repo/diff.py", line 151, in _filter_missing
metadata = repo_tree.metadata(path)
File "/Users/pmrowla/git/dvc/dvc/tree/repo.py", line 446, in metadata
raise FileNotFoundError
FileNotFoundError
------------------------------------------------------------
```
### Reproduce
```bash
#!/bin/bash
set -e
set -x
REPO="test_repo"
rm -rf $REPO
mkdir $REPO
pushd $REPO
git init
dvc init
echo "foo" > foo.txt
dvc add foo.txt
git add .
git commit -m "init"
dvc remove foo.txt.dvc
rm foo.txt
dvc diff -v
popd
```
This issue only affects workspace diff. If the changes after remove are `git commit`ed and then the two commits are `dvc diff`ed, the diff will work as expected. Issue can also be reproduced by doing `git rm <file>.dvc; rm <file>` instead of using `dvc remove`.
</issue>
<code>
[start of dvc/repo/diff.py]
1 import logging
2 import os
3
4 from dvc.exceptions import PathMissingError
5 from dvc.repo import locked
6 from dvc.tree.local import LocalTree
7 from dvc.tree.repo import RepoTree
8
9 logger = logging.getLogger(__name__)
10
11
12 @locked
13 def diff(self, a_rev="HEAD", b_rev=None, targets=None):
14 """
15 By default, it compares the workspace with the last commit's tree.
16
17 This implementation differs from `git diff` since DVC doesn't have
18 the concept of `index`, but it keeps the same interface, thus,
19 `dvc diff` would be the same as `dvc diff HEAD`.
20 """
21
22 if self.scm.no_commits:
23 return {}
24
25 b_rev = b_rev if b_rev else "workspace"
26 results = {}
27 missing_targets = {}
28 for rev in self.brancher(revs=[a_rev, b_rev]):
29 if rev == "workspace" and rev != b_rev:
30 # brancher always returns workspace, but we only need to compute
31 # workspace paths/checksums if b_rev was None
32 continue
33
34 targets_path_infos = None
35 if targets is not None:
36 # convert targets to path_infos, and capture any missing targets
37 targets_path_infos, missing_targets[rev] = _targets_to_path_infos(
38 self, targets
39 )
40
41 results[rev] = _paths_checksums(self, targets_path_infos)
42
43 if targets is not None:
44 # check for overlapping missing targets between a_rev and b_rev
45 for target in set(missing_targets[a_rev]) & set(
46 missing_targets[b_rev]
47 ):
48 raise PathMissingError(target, self)
49
50 old = results[a_rev]
51 new = results[b_rev]
52
53 # Compare paths between the old and new tree.
54 # set() efficiently converts dict keys to a set
55 added = sorted(set(new) - set(old))
56 deleted_or_missing = set(old) - set(new)
57 if b_rev == "workspace":
58 # missing status is only applicable when diffing local workspace
59 # against a commit
60 missing = sorted(_filter_missing(self, deleted_or_missing))
61 else:
62 missing = []
63 deleted = sorted(deleted_or_missing - set(missing))
64 modified = sorted(set(old) & set(new))
65
66 ret = {
67 "added": [{"path": path, "hash": new[path]} for path in added],
68 "deleted": [{"path": path, "hash": old[path]} for path in deleted],
69 "modified": [
70 {"path": path, "hash": {"old": old[path], "new": new[path]}}
71 for path in modified
72 if old[path] != new[path]
73 ],
74 "not in cache": [
75 {"path": path, "hash": old[path]} for path in missing
76 ],
77 }
78
79 return ret if any(ret.values()) else {}
80
81
82 def _paths_checksums(repo, targets):
83 """
84 A dictionary of checksums addressed by relpaths collected from
85 the current tree outputs.
86
87 To help distinguish between a directory and a file output,
88 the former one will come with a trailing slash in the path:
89
90 directory: "data/"
91 file: "data"
92 """
93
94 return dict(_output_paths(repo, targets))
95
96
97 def _output_paths(repo, targets):
98 repo_tree = RepoTree(repo, stream=True)
99 on_working_tree = isinstance(repo.tree, LocalTree)
100
101 def _exists(output):
102 if on_working_tree:
103 return output.exists
104 return True
105
106 def _to_path(output):
107 return (
108 str(output)
109 if not output.is_dir_checksum
110 else os.path.join(str(output), "")
111 )
112
113 def _to_checksum(output):
114 if on_working_tree:
115 return repo.cache.local.tree.get_hash(output.path_info).value
116 return output.hash_info.value
117
118 for stage in repo.stages:
119 for output in stage.outs:
120 if _exists(output):
121 yield_output = targets is None or any(
122 output.path_info.isin_or_eq(target) for target in targets
123 )
124
125 if yield_output:
126 yield _to_path(output), _to_checksum(output)
127
128 if output.is_dir_checksum and (
129 yield_output
130 or any(target.isin(output.path_info) for target in targets)
131 ):
132 yield from _dir_output_paths(repo_tree, output, targets)
133
134
135 def _dir_output_paths(repo_tree, output, targets=None):
136 from dvc.config import NoRemoteError
137
138 try:
139 for fname in repo_tree.walk_files(output.path_info):
140 if targets is None or any(
141 fname.isin_or_eq(target) for target in targets
142 ):
143 yield str(fname), repo_tree.get_file_hash(fname).value
144 except NoRemoteError:
145 logger.warning("dir cache entry for '%s' is missing", output)
146
147
148 def _filter_missing(repo, paths):
149 repo_tree = RepoTree(repo, stream=True)
150 for path in paths:
151 metadata = repo_tree.metadata(path)
152 if metadata.is_dvc:
153 out = metadata.outs[0]
154 if out.status().get(str(out)) == "not in cache":
155 yield path
156
157
158 def _targets_to_path_infos(repo, targets):
159 path_infos = []
160 missing = []
161
162 repo_tree = RepoTree(repo, stream=True)
163
164 for target in targets:
165 if repo_tree.exists(target):
166 path_infos.append(repo_tree.metadata(target).path_info)
167 else:
168 missing.append(target)
169
170 return path_infos, missing
171
[end of dvc/repo/diff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/repo/diff.py b/dvc/repo/diff.py
--- a/dvc/repo/diff.py
+++ b/dvc/repo/diff.py
@@ -148,11 +148,14 @@
def _filter_missing(repo, paths):
repo_tree = RepoTree(repo, stream=True)
for path in paths:
- metadata = repo_tree.metadata(path)
- if metadata.is_dvc:
- out = metadata.outs[0]
- if out.status().get(str(out)) == "not in cache":
- yield path
+ try:
+ metadata = repo_tree.metadata(path)
+ if metadata.is_dvc:
+ out = metadata.outs[0]
+ if out.status().get(str(out)) == "not in cache":
+ yield path
+ except FileNotFoundError:
+ pass
def _targets_to_path_infos(repo, targets):
| {"golden_diff": "diff --git a/dvc/repo/diff.py b/dvc/repo/diff.py\n--- a/dvc/repo/diff.py\n+++ b/dvc/repo/diff.py\n@@ -148,11 +148,14 @@\n def _filter_missing(repo, paths):\n repo_tree = RepoTree(repo, stream=True)\n for path in paths:\n- metadata = repo_tree.metadata(path)\n- if metadata.is_dvc:\n- out = metadata.outs[0]\n- if out.status().get(str(out)) == \"not in cache\":\n- yield path\n+ try:\n+ metadata = repo_tree.metadata(path)\n+ if metadata.is_dvc:\n+ out = metadata.outs[0]\n+ if out.status().get(str(out)) == \"not in cache\":\n+ yield path\n+ except FileNotFoundError:\n+ pass\n \n \n def _targets_to_path_infos(repo, targets):\n", "issue": "diff: unexpected error when diffing workspace after dvc remove\n# Bug Report\r\n\r\n## Description\r\n\r\n`dvc diff` will raise unexpected error with no other output if both the `.dvc` file and the original data file are removed from the workspace (i.e. after running `dvc remove`).\r\n\r\n```\r\n$ git status \u23ce\r\nOn branch master\r\nChanges not staged for commit:\r\n deleted: .gitignore\r\n deleted: foo.txt.dvc\r\n\r\nno changes added to commit\r\n\r\n$ dvc diff -v\r\n2020-12-28 15:43:46,270 DEBUG: Check for update is enabled.\r\n2020-12-28 15:43:46,584 ERROR: unexpected error\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/Users/pmrowla/git/dvc/dvc/main.py\", line 90, in main\r\n ret = cmd.run()\r\n File \"/Users/pmrowla/git/dvc/dvc/command/diff.py\", line 131, in run\r\n diff = self.repo.diff(\r\n File \"/Users/pmrowla/git/dvc/dvc/repo/__init__.py\", line 53, in wrapper\r\n return f(repo, *args, **kwargs)\r\n File \"/Users/pmrowla/git/dvc/dvc/repo/diff.py\", line 60, in diff\r\n missing = sorted(_filter_missing(self, deleted_or_missing))\r\n File \"/Users/pmrowla/git/dvc/dvc/repo/diff.py\", line 151, in _filter_missing\r\n metadata = repo_tree.metadata(path)\r\n File \"/Users/pmrowla/git/dvc/dvc/tree/repo.py\", line 446, in metadata\r\n raise FileNotFoundError\r\nFileNotFoundError\r\n------------------------------------------------------------\r\n```\r\n\r\n### Reproduce\r\n\r\n```bash\r\n#!/bin/bash\r\n\r\nset -e\r\nset -x\r\n\r\nREPO=\"test_repo\"\r\n\r\nrm -rf $REPO\r\nmkdir $REPO\r\npushd $REPO\r\n\r\ngit init\r\ndvc init\r\necho \"foo\" > foo.txt\r\ndvc add foo.txt\r\ngit add .\r\ngit commit -m \"init\"\r\n\r\ndvc remove foo.txt.dvc\r\nrm foo.txt\r\ndvc diff -v\r\n\r\npopd\r\n```\r\n\r\nThis issue only affects workspace diff. If the changes after remove are `git commit`ed and then the two commits are `dvc diff`ed, the diff will work as expected. Issue can also be reproduced by doing `git rm <file>.dvc; rm <file>` instead of using `dvc remove`.\n", "before_files": [{"content": "import logging\nimport os\n\nfrom dvc.exceptions import PathMissingError\nfrom dvc.repo import locked\nfrom dvc.tree.local import LocalTree\nfrom dvc.tree.repo import RepoTree\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\ndef diff(self, a_rev=\"HEAD\", b_rev=None, targets=None):\n \"\"\"\n By default, it compares the workspace with the last commit's tree.\n\n This implementation differs from `git diff` since DVC doesn't have\n the concept of `index`, but it keeps the same interface, thus,\n `dvc diff` would be the same as `dvc diff HEAD`.\n \"\"\"\n\n if self.scm.no_commits:\n return {}\n\n b_rev = b_rev if b_rev else \"workspace\"\n results = {}\n missing_targets = {}\n for rev in self.brancher(revs=[a_rev, b_rev]):\n if rev == \"workspace\" and rev != b_rev:\n # brancher always returns workspace, but we only need to compute\n # workspace paths/checksums if b_rev was None\n continue\n\n targets_path_infos = None\n if targets is not None:\n # convert targets to path_infos, and capture any missing targets\n targets_path_infos, missing_targets[rev] = _targets_to_path_infos(\n self, targets\n )\n\n results[rev] = _paths_checksums(self, targets_path_infos)\n\n if targets is not None:\n # check for overlapping missing targets between a_rev and b_rev\n for target in set(missing_targets[a_rev]) & set(\n missing_targets[b_rev]\n ):\n raise PathMissingError(target, self)\n\n old = results[a_rev]\n new = results[b_rev]\n\n # Compare paths between the old and new tree.\n # set() efficiently converts dict keys to a set\n added = sorted(set(new) - set(old))\n deleted_or_missing = set(old) - set(new)\n if b_rev == \"workspace\":\n # missing status is only applicable when diffing local workspace\n # against a commit\n missing = sorted(_filter_missing(self, deleted_or_missing))\n else:\n missing = []\n deleted = sorted(deleted_or_missing - set(missing))\n modified = sorted(set(old) & set(new))\n\n ret = {\n \"added\": [{\"path\": path, \"hash\": new[path]} for path in added],\n \"deleted\": [{\"path\": path, \"hash\": old[path]} for path in deleted],\n \"modified\": [\n {\"path\": path, \"hash\": {\"old\": old[path], \"new\": new[path]}}\n for path in modified\n if old[path] != new[path]\n ],\n \"not in cache\": [\n {\"path\": path, \"hash\": old[path]} for path in missing\n ],\n }\n\n return ret if any(ret.values()) else {}\n\n\ndef _paths_checksums(repo, targets):\n \"\"\"\n A dictionary of checksums addressed by relpaths collected from\n the current tree outputs.\n\n To help distinguish between a directory and a file output,\n the former one will come with a trailing slash in the path:\n\n directory: \"data/\"\n file: \"data\"\n \"\"\"\n\n return dict(_output_paths(repo, targets))\n\n\ndef _output_paths(repo, targets):\n repo_tree = RepoTree(repo, stream=True)\n on_working_tree = isinstance(repo.tree, LocalTree)\n\n def _exists(output):\n if on_working_tree:\n return output.exists\n return True\n\n def _to_path(output):\n return (\n str(output)\n if not output.is_dir_checksum\n else os.path.join(str(output), \"\")\n )\n\n def _to_checksum(output):\n if on_working_tree:\n return repo.cache.local.tree.get_hash(output.path_info).value\n return output.hash_info.value\n\n for stage in repo.stages:\n for output in stage.outs:\n if _exists(output):\n yield_output = targets is None or any(\n output.path_info.isin_or_eq(target) for target in targets\n )\n\n if yield_output:\n yield _to_path(output), _to_checksum(output)\n\n if output.is_dir_checksum and (\n yield_output\n or any(target.isin(output.path_info) for target in targets)\n ):\n yield from _dir_output_paths(repo_tree, output, targets)\n\n\ndef _dir_output_paths(repo_tree, output, targets=None):\n from dvc.config import NoRemoteError\n\n try:\n for fname in repo_tree.walk_files(output.path_info):\n if targets is None or any(\n fname.isin_or_eq(target) for target in targets\n ):\n yield str(fname), repo_tree.get_file_hash(fname).value\n except NoRemoteError:\n logger.warning(\"dir cache entry for '%s' is missing\", output)\n\n\ndef _filter_missing(repo, paths):\n repo_tree = RepoTree(repo, stream=True)\n for path in paths:\n metadata = repo_tree.metadata(path)\n if metadata.is_dvc:\n out = metadata.outs[0]\n if out.status().get(str(out)) == \"not in cache\":\n yield path\n\n\ndef _targets_to_path_infos(repo, targets):\n path_infos = []\n missing = []\n\n repo_tree = RepoTree(repo, stream=True)\n\n for target in targets:\n if repo_tree.exists(target):\n path_infos.append(repo_tree.metadata(target).path_info)\n else:\n missing.append(target)\n\n return path_infos, missing\n", "path": "dvc/repo/diff.py"}]} | 2,691 | 201 |
gh_patches_debug_29988 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1942 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Every string character is being checked
As reported @yetanotherjsontodatabaseexporter in #1939, strings are also sequences. We are unnecessarily checking every character [here](https://github.com/open-telemetry/opentelemetry-python/blob/f11ed2f3bacb11d53a7a2b4837cf6308fa34cc71/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L46).
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/attributes/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # type: ignore
15
16 import logging
17 import threading
18 from collections import OrderedDict
19 from collections.abc import MutableMapping
20 from types import MappingProxyType
21 from typing import MutableSequence, Optional, Sequence
22
23 from opentelemetry.util import types
24
25 _VALID_ATTR_VALUE_TYPES = (bool, str, int, float)
26
27
28 _logger = logging.getLogger(__name__)
29
30
31 def _is_valid_attribute_value(value: types.AttributeValue) -> bool:
32 """Checks if attribute value is valid.
33
34 An attribute value is valid if it is either:
35 - A primitive type: string, boolean, double precision floating
36 point (IEEE 754-1985) or integer.
37 - An array of primitive type values. The array MUST be homogeneous,
38 i.e. it MUST NOT contain values of different types.
39 """
40
41 if isinstance(value, Sequence):
42 if len(value) == 0:
43 return True
44
45 sequence_first_valid_type = None
46 for element in value:
47 if element is None:
48 continue
49 element_type = type(element)
50 if element_type not in _VALID_ATTR_VALUE_TYPES:
51 _logger.warning(
52 "Invalid type %s in attribute value sequence. Expected one of "
53 "%s or None",
54 element_type.__name__,
55 [
56 valid_type.__name__
57 for valid_type in _VALID_ATTR_VALUE_TYPES
58 ],
59 )
60 return False
61 # The type of the sequence must be homogeneous. The first non-None
62 # element determines the type of the sequence
63 if sequence_first_valid_type is None:
64 sequence_first_valid_type = element_type
65 elif not isinstance(element, sequence_first_valid_type):
66 _logger.warning(
67 "Mixed types %s and %s in attribute value sequence",
68 sequence_first_valid_type.__name__,
69 type(element).__name__,
70 )
71 return False
72
73 elif not isinstance(value, _VALID_ATTR_VALUE_TYPES):
74 _logger.warning(
75 "Invalid type %s for attribute value. Expected one of %s or a "
76 "sequence of those types",
77 type(value).__name__,
78 [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],
79 )
80 return False
81 return True
82
83
84 def _filter_attributes(attributes: types.Attributes) -> None:
85 """Applies attribute validation rules and drops (key, value) pairs
86 that doesn't adhere to attributes specification.
87
88 https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes.
89 """
90 if attributes:
91 for attr_key, attr_value in list(attributes.items()):
92 if not attr_key:
93 _logger.warning("invalid key `%s` (empty or null)", attr_key)
94 attributes.pop(attr_key)
95 continue
96
97 if _is_valid_attribute_value(attr_value):
98 if isinstance(attr_value, MutableSequence):
99 attributes[attr_key] = tuple(attr_value)
100 if isinstance(attr_value, bytes):
101 try:
102 attributes[attr_key] = attr_value.decode()
103 except ValueError:
104 attributes.pop(attr_key)
105 _logger.warning("Byte attribute could not be decoded.")
106 else:
107 attributes.pop(attr_key)
108
109
110 _DEFAULT_LIMIT = 128
111
112
113 class BoundedAttributes(MutableMapping):
114 """An ordered dict with a fixed max capacity.
115
116 Oldest elements are dropped when the dict is full and a new element is
117 added.
118 """
119
120 def __init__(
121 self,
122 maxlen: Optional[int] = _DEFAULT_LIMIT,
123 attributes: types.Attributes = None,
124 immutable: bool = True,
125 ):
126 if maxlen is not None:
127 if not isinstance(maxlen, int) or maxlen < 0:
128 raise ValueError(
129 "maxlen must be valid int greater or equal to 0"
130 )
131 self.maxlen = maxlen
132 self.dropped = 0
133 self._dict = OrderedDict() # type: OrderedDict
134 self._lock = threading.Lock() # type: threading.Lock
135 if attributes:
136 _filter_attributes(attributes)
137 for key, value in attributes.items():
138 self[key] = value
139 self._immutable = immutable
140
141 def __repr__(self):
142 return "{}({}, maxlen={})".format(
143 type(self).__name__, dict(self._dict), self.maxlen
144 )
145
146 def __getitem__(self, key):
147 return self._dict[key]
148
149 def __setitem__(self, key, value):
150 if getattr(self, "_immutable", False):
151 raise TypeError
152 with self._lock:
153 if self.maxlen is not None and self.maxlen == 0:
154 self.dropped += 1
155 return
156
157 if key in self._dict:
158 del self._dict[key]
159 elif self.maxlen is not None and len(self._dict) == self.maxlen:
160 del self._dict[next(iter(self._dict.keys()))]
161 self.dropped += 1
162 self._dict[key] = value
163
164 def __delitem__(self, key):
165 if getattr(self, "_immutable", False):
166 raise TypeError
167 with self._lock:
168 del self._dict[key]
169
170 def __iter__(self):
171 with self._lock:
172 return iter(self._dict.copy())
173
174 def __len__(self):
175 return len(self._dict)
176
177 def copy(self):
178 return self._dict.copy()
179
[end of opentelemetry-api/src/opentelemetry/attributes/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
@@ -17,7 +17,6 @@
import threading
from collections import OrderedDict
from collections.abc import MutableMapping
-from types import MappingProxyType
from typing import MutableSequence, Optional, Sequence
from opentelemetry.util import types
@@ -38,9 +37,10 @@
i.e. it MUST NOT contain values of different types.
"""
+ if isinstance(value, _VALID_ATTR_VALUE_TYPES):
+ return True
+
if isinstance(value, Sequence):
- if len(value) == 0:
- return True
sequence_first_valid_type = None
for element in value:
@@ -69,16 +69,15 @@
type(element).__name__,
)
return False
-
- elif not isinstance(value, _VALID_ATTR_VALUE_TYPES):
- _logger.warning(
- "Invalid type %s for attribute value. Expected one of %s or a "
- "sequence of those types",
- type(value).__name__,
- [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],
- )
- return False
- return True
+ return True
+
+ _logger.warning(
+ "Invalid type %s for attribute value. Expected one of %s or a "
+ "sequence of those types",
+ type(value).__name__,
+ [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],
+ )
+ return False
def _filter_attributes(attributes: types.Attributes) -> None:
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n@@ -17,7 +17,6 @@\n import threading\n from collections import OrderedDict\n from collections.abc import MutableMapping\n-from types import MappingProxyType\n from typing import MutableSequence, Optional, Sequence\n \n from opentelemetry.util import types\n@@ -38,9 +37,10 @@\n i.e. it MUST NOT contain values of different types.\n \"\"\"\n \n+ if isinstance(value, _VALID_ATTR_VALUE_TYPES):\n+ return True\n+\n if isinstance(value, Sequence):\n- if len(value) == 0:\n- return True\n \n sequence_first_valid_type = None\n for element in value:\n@@ -69,16 +69,15 @@\n type(element).__name__,\n )\n return False\n-\n- elif not isinstance(value, _VALID_ATTR_VALUE_TYPES):\n- _logger.warning(\n- \"Invalid type %s for attribute value. Expected one of %s or a \"\n- \"sequence of those types\",\n- type(value).__name__,\n- [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],\n- )\n- return False\n- return True\n+ return True\n+\n+ _logger.warning(\n+ \"Invalid type %s for attribute value. Expected one of %s or a \"\n+ \"sequence of those types\",\n+ type(value).__name__,\n+ [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],\n+ )\n+ return False\n \n \n def _filter_attributes(attributes: types.Attributes) -> None:\n", "issue": "Every string character is being checked\nAs reported @yetanotherjsontodatabaseexporter in #1939, strings are also sequences. We are unnecessarily checking every character [here](https://github.com/open-telemetry/opentelemetry-python/blob/f11ed2f3bacb11d53a7a2b4837cf6308fa34cc71/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L46).\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\nimport logging\nimport threading\nfrom collections import OrderedDict\nfrom collections.abc import MutableMapping\nfrom types import MappingProxyType\nfrom typing import MutableSequence, Optional, Sequence\n\nfrom opentelemetry.util import types\n\n_VALID_ATTR_VALUE_TYPES = (bool, str, int, float)\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef _is_valid_attribute_value(value: types.AttributeValue) -> bool:\n \"\"\"Checks if attribute value is valid.\n\n An attribute value is valid if it is either:\n - A primitive type: string, boolean, double precision floating\n point (IEEE 754-1985) or integer.\n - An array of primitive type values. The array MUST be homogeneous,\n i.e. it MUST NOT contain values of different types.\n \"\"\"\n\n if isinstance(value, Sequence):\n if len(value) == 0:\n return True\n\n sequence_first_valid_type = None\n for element in value:\n if element is None:\n continue\n element_type = type(element)\n if element_type not in _VALID_ATTR_VALUE_TYPES:\n _logger.warning(\n \"Invalid type %s in attribute value sequence. Expected one of \"\n \"%s or None\",\n element_type.__name__,\n [\n valid_type.__name__\n for valid_type in _VALID_ATTR_VALUE_TYPES\n ],\n )\n return False\n # The type of the sequence must be homogeneous. The first non-None\n # element determines the type of the sequence\n if sequence_first_valid_type is None:\n sequence_first_valid_type = element_type\n elif not isinstance(element, sequence_first_valid_type):\n _logger.warning(\n \"Mixed types %s and %s in attribute value sequence\",\n sequence_first_valid_type.__name__,\n type(element).__name__,\n )\n return False\n\n elif not isinstance(value, _VALID_ATTR_VALUE_TYPES):\n _logger.warning(\n \"Invalid type %s for attribute value. Expected one of %s or a \"\n \"sequence of those types\",\n type(value).__name__,\n [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],\n )\n return False\n return True\n\n\ndef _filter_attributes(attributes: types.Attributes) -> None:\n \"\"\"Applies attribute validation rules and drops (key, value) pairs\n that doesn't adhere to attributes specification.\n\n https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes.\n \"\"\"\n if attributes:\n for attr_key, attr_value in list(attributes.items()):\n if not attr_key:\n _logger.warning(\"invalid key `%s` (empty or null)\", attr_key)\n attributes.pop(attr_key)\n continue\n\n if _is_valid_attribute_value(attr_value):\n if isinstance(attr_value, MutableSequence):\n attributes[attr_key] = tuple(attr_value)\n if isinstance(attr_value, bytes):\n try:\n attributes[attr_key] = attr_value.decode()\n except ValueError:\n attributes.pop(attr_key)\n _logger.warning(\"Byte attribute could not be decoded.\")\n else:\n attributes.pop(attr_key)\n\n\n_DEFAULT_LIMIT = 128\n\n\nclass BoundedAttributes(MutableMapping):\n \"\"\"An ordered dict with a fixed max capacity.\n\n Oldest elements are dropped when the dict is full and a new element is\n added.\n \"\"\"\n\n def __init__(\n self,\n maxlen: Optional[int] = _DEFAULT_LIMIT,\n attributes: types.Attributes = None,\n immutable: bool = True,\n ):\n if maxlen is not None:\n if not isinstance(maxlen, int) or maxlen < 0:\n raise ValueError(\n \"maxlen must be valid int greater or equal to 0\"\n )\n self.maxlen = maxlen\n self.dropped = 0\n self._dict = OrderedDict() # type: OrderedDict\n self._lock = threading.Lock() # type: threading.Lock\n if attributes:\n _filter_attributes(attributes)\n for key, value in attributes.items():\n self[key] = value\n self._immutable = immutable\n\n def __repr__(self):\n return \"{}({}, maxlen={})\".format(\n type(self).__name__, dict(self._dict), self.maxlen\n )\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __setitem__(self, key, value):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n if self.maxlen is not None and self.maxlen == 0:\n self.dropped += 1\n return\n\n if key in self._dict:\n del self._dict[key]\n elif self.maxlen is not None and len(self._dict) == self.maxlen:\n del self._dict[next(iter(self._dict.keys()))]\n self.dropped += 1\n self._dict[key] = value\n\n def __delitem__(self, key):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n del self._dict[key]\n\n def __iter__(self):\n with self._lock:\n return iter(self._dict.copy())\n\n def __len__(self):\n return len(self._dict)\n\n def copy(self):\n return self._dict.copy()\n", "path": "opentelemetry-api/src/opentelemetry/attributes/__init__.py"}]} | 2,356 | 400 |
gh_patches_debug_41073 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSeg-1747 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
paddleseg/models/hrnet_contrast.py 中没有执行 init_weight
paddleseg/models/hrnet_contrast.py 中__init__()没有执行 init_weight,导致hrnet_w48_contrast 没法加载完整的模型
</issue>
<code>
[start of paddleseg/models/hrnet_contrast.py]
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import paddle
16 import paddle.nn as nn
17 import paddle.nn.functional as F
18
19 from paddleseg.cvlibs import manager
20 from paddleseg.models import layers
21 from paddleseg.utils import utils
22
23
24 @manager.MODELS.add_component
25 class HRNetW48Contrast(nn.Layer):
26 """
27 The HRNetW48Contrast implementation based on PaddlePaddle.
28
29 The original article refers to
30 Wenguan Wang, Tianfei Zhou, et al. "Exploring Cross-Image Pixel Contrast for Semantic Segmentation"
31 (https://arxiv.org/abs/2101.11939).
32
33 Args:
34 in_channels (int): The output dimensions of backbone.
35 num_classes (int): The unique number of target classes.
36 backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.
37 drop_prob (float): The probability of dropout.
38 proj_dim (int): The projection dimensions.
39 align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
40 e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
41 pretrained (str, optional): The path or url of pretrained model. Default: None.
42 """
43 def __init__(self,
44 in_channels,
45 num_classes,
46 backbone,
47 drop_prob,
48 proj_dim,
49 align_corners=False,
50 pretrained=None):
51 super().__init__()
52 self.in_channels = in_channels
53 self.backbone = backbone
54 self.num_classes = num_classes
55 self.proj_dim = proj_dim
56 self.align_corners = align_corners
57 self.pretrained = pretrained
58
59 self.cls_head = nn.Sequential(
60 layers.ConvBNReLU(in_channels,
61 in_channels,
62 kernel_size=3,
63 stride=1,
64 padding=1),
65 nn.Dropout2D(drop_prob),
66 nn.Conv2D(in_channels,
67 num_classes,
68 kernel_size=1,
69 stride=1,
70 bias_attr=False),
71 )
72 self.proj_head = ProjectionHead(dim_in=in_channels,
73 proj_dim=self.proj_dim)
74
75 def init_weight(self):
76 if self.pretrained is not None:
77 utils.load_entire_model(self, self.pretrained)
78
79 def forward(self, x):
80 feats = self.backbone(x)[0]
81 out = self.cls_head(feats)
82 logit_list = []
83 if self.training:
84 emb = self.proj_head(feats)
85 logit_list.append(
86 F.interpolate(out,
87 paddle.shape(x)[2:],
88 mode='bilinear',
89 align_corners=self.align_corners))
90 logit_list.append({'seg': out, 'embed': emb})
91 else:
92 logit_list.append(
93 F.interpolate(out,
94 paddle.shape(x)[2:],
95 mode='bilinear',
96 align_corners=self.align_corners))
97 return logit_list
98
99
100 class ProjectionHead(nn.Layer):
101 """
102 The projection head used by contrast learning.
103 Args:
104 dim_in (int): The dimensions of input features.
105 proj_dim (int, optional): The output dimensions of projection head. Default: 256.
106 proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
107 """
108 def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
109 super(ProjectionHead, self).__init__()
110 if proj == 'linear':
111 self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)
112 elif proj == 'convmlp':
113 self.proj = nn.Sequential(
114 layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),
115 nn.Conv2D(dim_in, proj_dim, kernel_size=1),
116 )
117 else:
118 raise ValueError(
119 "The type of project head only support 'linear' and 'convmlp', but got {}."
120 .format(proj))
121
122 def forward(self, x):
123 return F.normalize(self.proj(x), p=2, axis=1)
124
[end of paddleseg/models/hrnet_contrast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py
--- a/paddleseg/models/hrnet_contrast.py
+++ b/paddleseg/models/hrnet_contrast.py
@@ -40,6 +40,7 @@
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
pretrained (str, optional): The path or url of pretrained model. Default: None.
"""
+
def __init__(self,
in_channels,
num_classes,
@@ -54,23 +55,23 @@
self.num_classes = num_classes
self.proj_dim = proj_dim
self.align_corners = align_corners
- self.pretrained = pretrained
self.cls_head = nn.Sequential(
- layers.ConvBNReLU(in_channels,
- in_channels,
- kernel_size=3,
- stride=1,
- padding=1),
+ layers.ConvBNReLU(
+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),
nn.Dropout2D(drop_prob),
- nn.Conv2D(in_channels,
- num_classes,
- kernel_size=1,
- stride=1,
- bias_attr=False),
+ nn.Conv2D(
+ in_channels,
+ num_classes,
+ kernel_size=1,
+ stride=1,
+ bias_attr=False),
)
- self.proj_head = ProjectionHead(dim_in=in_channels,
- proj_dim=self.proj_dim)
+ self.proj_head = ProjectionHead(
+ dim_in=in_channels, proj_dim=self.proj_dim)
+
+ self.pretrained = pretrained
+ self.init_weight()
def init_weight(self):
if self.pretrained is not None:
@@ -83,17 +84,19 @@
if self.training:
emb = self.proj_head(feats)
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
logit_list.append({'seg': out, 'embed': emb})
else:
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
return logit_list
@@ -105,6 +108,7 @@
proj_dim (int, optional): The output dimensions of projection head. Default: 256.
proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
"""
+
def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
super(ProjectionHead, self).__init__()
if proj == 'linear':
| {"golden_diff": "diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py\n--- a/paddleseg/models/hrnet_contrast.py\n+++ b/paddleseg/models/hrnet_contrast.py\n@@ -40,6 +40,7 @@\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n+\n def __init__(self,\n in_channels,\n num_classes,\n@@ -54,23 +55,23 @@\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n- self.pretrained = pretrained\n \n self.cls_head = nn.Sequential(\n- layers.ConvBNReLU(in_channels,\n- in_channels,\n- kernel_size=3,\n- stride=1,\n- padding=1),\n+ layers.ConvBNReLU(\n+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),\n nn.Dropout2D(drop_prob),\n- nn.Conv2D(in_channels,\n- num_classes,\n- kernel_size=1,\n- stride=1,\n- bias_attr=False),\n+ nn.Conv2D(\n+ in_channels,\n+ num_classes,\n+ kernel_size=1,\n+ stride=1,\n+ bias_attr=False),\n )\n- self.proj_head = ProjectionHead(dim_in=in_channels,\n- proj_dim=self.proj_dim)\n+ self.proj_head = ProjectionHead(\n+ dim_in=in_channels, proj_dim=self.proj_dim)\n+\n+ self.pretrained = pretrained\n+ self.init_weight()\n \n def init_weight(self):\n if self.pretrained is not None:\n@@ -83,17 +84,19 @@\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n return logit_list\n \n \n@@ -105,6 +108,7 @@\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n+\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n", "issue": "paddleseg/models/hrnet_contrast.py \u4e2d\u6ca1\u6709\u6267\u884c init_weight\npaddleseg/models/hrnet_contrast.py \u4e2d__init__()\u6ca1\u6709\u6267\u884c init_weight\uff0c\u5bfc\u81f4hrnet_w48_contrast \u6ca1\u6cd5\u52a0\u8f7d\u5b8c\u6574\u7684\u6a21\u578b\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n\[email protected]_component\nclass HRNetW48Contrast(nn.Layer):\n \"\"\"\n The HRNetW48Contrast implementation based on PaddlePaddle.\n\n The original article refers to\n Wenguan Wang, Tianfei Zhou, et al. \"Exploring Cross-Image Pixel Contrast for Semantic Segmentation\"\n (https://arxiv.org/abs/2101.11939).\n\n Args:\n in_channels (int): The output dimensions of backbone.\n num_classes (int): The unique number of target classes.\n backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.\n drop_prob (float): The probability of dropout.\n proj_dim (int): The projection dimensions.\n align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n def __init__(self,\n in_channels,\n num_classes,\n backbone,\n drop_prob,\n proj_dim,\n align_corners=False,\n pretrained=None):\n super().__init__()\n self.in_channels = in_channels\n self.backbone = backbone\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n self.pretrained = pretrained\n\n self.cls_head = nn.Sequential(\n layers.ConvBNReLU(in_channels,\n in_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.Dropout2D(drop_prob),\n nn.Conv2D(in_channels,\n num_classes,\n kernel_size=1,\n stride=1,\n bias_attr=False),\n )\n self.proj_head = ProjectionHead(dim_in=in_channels,\n proj_dim=self.proj_dim)\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n feats = self.backbone(x)[0]\n out = self.cls_head(feats)\n logit_list = []\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n return logit_list\n\n\nclass ProjectionHead(nn.Layer):\n \"\"\"\n The projection head used by contrast learning.\n Args:\n dim_in (int): The dimensions of input features.\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)\n elif proj == 'convmlp':\n self.proj = nn.Sequential(\n layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),\n nn.Conv2D(dim_in, proj_dim, kernel_size=1),\n )\n else:\n raise ValueError(\n \"The type of project head only support 'linear' and 'convmlp', but got {}.\"\n .format(proj))\n\n def forward(self, x):\n return F.normalize(self.proj(x), p=2, axis=1)\n", "path": "paddleseg/models/hrnet_contrast.py"}]} | 1,880 | 701 |
gh_patches_debug_13422 | rasdani/github-patches | git_diff | urllib3__urllib3-1399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Interrupted system call while profiling with plop
Using Python 2.7.12, requests 2.19.0, urllib3 1.23 and using [plop](https://github.com/bdarnell/plop) for profiling, I'm intermittently hitting this stack trace in long-running code:
```
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py", line 525, in get
return self.request('GET', url, **kwargs)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py", line 512, in request
resp = self.send(prep, **send_kwargs)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py", line 622, in send
r = adapter.send(request, **kwargs)
File "/home/bmerry/work/sdp/git/katdal/katdal/chunkstore_s3.py", line 56, in send
return super(_TimeoutHTTPAdapter, self).send(request, stream, timeout, *args, **kwargs)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/adapters.py", line 445, in send
timeout=timeout
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 588, in urlopen
conn = self._get_conn(timeout=pool_timeout)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 239, in _get_conn
if conn and is_connection_dropped(conn):
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/connection.py", line 23, in is_connection_dropped
return wait_for_read(sock, timeout=0.0)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 146, in wait_for_read
return wait_for_socket(sock, read=True, timeout=timeout)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 107, in poll_wait_for_socket
return bool(_retry_on_intr(do_poll, timeout))
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 47, in _retry_on_intr
return fn(timeout)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 105, in do_poll
return poll_obj.poll(t)
select.error: (4, 'Interrupted system call')
Profiling timer expired
```
Looking at the implementation of `_retry_on_intr` for older Pythons, it has this special case:
```python
if timeout is not None and timeout <= 0:
return fn(timeout)
```
which in turn seems to apply to the call stack above (see is_connection_dropped, which passes a timeout of 0.0). So apparently there are cases where poll can fail with EINTR even with a zero timeout. FWIW, I'm running Ubuntu 16.04 and Linux 4.4.0-116-generic. I'll try commenting out that fast path and doing some testing overnight to confirm that that is the problem.
I don't yet had a minimal reproducible example, but I'll work on it (my first attempt of just banging on some URL in a loop hasn't worked). I wanted to file this before I forgot.
</issue>
<code>
[start of urllib3/util/wait.py]
1 import errno
2 from functools import partial
3 import select
4 import sys
5 try:
6 from time import monotonic
7 except ImportError:
8 from time import time as monotonic
9
10 __all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
11
12
13 class NoWayToWaitForSocketError(Exception):
14 pass
15
16
17 # How should we wait on sockets?
18 #
19 # There are two types of APIs you can use for waiting on sockets: the fancy
20 # modern stateful APIs like epoll/kqueue, and the older stateless APIs like
21 # select/poll. The stateful APIs are more efficient when you have a lots of
22 # sockets to keep track of, because you can set them up once and then use them
23 # lots of times. But we only ever want to wait on a single socket at a time
24 # and don't want to keep track of state, so the stateless APIs are actually
25 # more efficient. So we want to use select() or poll().
26 #
27 # Now, how do we choose between select() and poll()? On traditional Unixes,
28 # select() has a strange calling convention that makes it slow, or fail
29 # altogether, for high-numbered file descriptors. The point of poll() is to fix
30 # that, so on Unixes, we prefer poll().
31 #
32 # On Windows, there is no poll() (or at least Python doesn't provide a wrapper
33 # for it), but that's OK, because on Windows, select() doesn't have this
34 # strange calling convention; plain select() works fine.
35 #
36 # So: on Windows we use select(), and everywhere else we use poll(). We also
37 # fall back to select() in case poll() is somehow broken or missing.
38
39 if sys.version_info >= (3, 5):
40 # Modern Python, that retries syscalls by default
41 def _retry_on_intr(fn, timeout):
42 return fn(timeout)
43 else:
44 # Old and broken Pythons.
45 def _retry_on_intr(fn, timeout):
46 if timeout is not None and timeout <= 0:
47 return fn(timeout)
48
49 if timeout is None:
50 deadline = float("inf")
51 else:
52 deadline = monotonic() + timeout
53
54 while True:
55 try:
56 return fn(timeout)
57 # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
58 except (OSError, select.error) as e:
59 # 'e.args[0]' incantation works for both OSError and select.error
60 if e.args[0] != errno.EINTR:
61 raise
62 else:
63 timeout = deadline - monotonic()
64 if timeout < 0:
65 timeout = 0
66 if timeout == float("inf"):
67 timeout = None
68 continue
69
70
71 def select_wait_for_socket(sock, read=False, write=False, timeout=None):
72 if not read and not write:
73 raise RuntimeError("must specify at least one of read=True, write=True")
74 rcheck = []
75 wcheck = []
76 if read:
77 rcheck.append(sock)
78 if write:
79 wcheck.append(sock)
80 # When doing a non-blocking connect, most systems signal success by
81 # marking the socket writable. Windows, though, signals success by marked
82 # it as "exceptional". We paper over the difference by checking the write
83 # sockets for both conditions. (The stdlib selectors module does the same
84 # thing.)
85 fn = partial(select.select, rcheck, wcheck, wcheck)
86 rready, wready, xready = _retry_on_intr(fn, timeout)
87 return bool(rready or wready or xready)
88
89
90 def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
91 if not read and not write:
92 raise RuntimeError("must specify at least one of read=True, write=True")
93 mask = 0
94 if read:
95 mask |= select.POLLIN
96 if write:
97 mask |= select.POLLOUT
98 poll_obj = select.poll()
99 poll_obj.register(sock, mask)
100
101 # For some reason, poll() takes timeout in milliseconds
102 def do_poll(t):
103 if t is not None:
104 t *= 1000
105 return poll_obj.poll(t)
106
107 return bool(_retry_on_intr(do_poll, timeout))
108
109
110 def null_wait_for_socket(*args, **kwargs):
111 raise NoWayToWaitForSocketError("no select-equivalent available")
112
113
114 def _have_working_poll():
115 # Apparently some systems have a select.poll that fails as soon as you try
116 # to use it, either due to strange configuration or broken monkeypatching
117 # from libraries like eventlet/greenlet.
118 try:
119 poll_obj = select.poll()
120 poll_obj.poll(0)
121 except (AttributeError, OSError):
122 return False
123 else:
124 return True
125
126
127 def wait_for_socket(*args, **kwargs):
128 # We delay choosing which implementation to use until the first time we're
129 # called. We could do it at import time, but then we might make the wrong
130 # decision if someone goes wild with monkeypatching select.poll after
131 # we're imported.
132 global wait_for_socket
133 if _have_working_poll():
134 wait_for_socket = poll_wait_for_socket
135 elif hasattr(select, "select"):
136 wait_for_socket = select_wait_for_socket
137 else: # Platform-specific: Appengine.
138 wait_for_socket = null_wait_for_socket
139 return wait_for_socket(*args, **kwargs)
140
141
142 def wait_for_read(sock, timeout=None):
143 """ Waits for reading to be available on a given socket.
144 Returns True if the socket is readable, or False if the timeout expired.
145 """
146 return wait_for_socket(sock, read=True, timeout=timeout)
147
148
149 def wait_for_write(sock, timeout=None):
150 """ Waits for writing to be available on a given socket.
151 Returns True if the socket is readable, or False if the timeout expired.
152 """
153 return wait_for_socket(sock, write=True, timeout=timeout)
154
[end of urllib3/util/wait.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/urllib3/util/wait.py b/urllib3/util/wait.py
--- a/urllib3/util/wait.py
+++ b/urllib3/util/wait.py
@@ -43,9 +43,6 @@
else:
# Old and broken Pythons.
def _retry_on_intr(fn, timeout):
- if timeout is not None and timeout <= 0:
- return fn(timeout)
-
if timeout is None:
deadline = float("inf")
else:
@@ -117,7 +114,7 @@
# from libraries like eventlet/greenlet.
try:
poll_obj = select.poll()
- poll_obj.poll(0)
+ _retry_on_intr(poll_obj.poll, 0)
except (AttributeError, OSError):
return False
else:
| {"golden_diff": "diff --git a/urllib3/util/wait.py b/urllib3/util/wait.py\n--- a/urllib3/util/wait.py\n+++ b/urllib3/util/wait.py\n@@ -43,9 +43,6 @@\n else:\n # Old and broken Pythons.\n def _retry_on_intr(fn, timeout):\n- if timeout is not None and timeout <= 0:\n- return fn(timeout)\n-\n if timeout is None:\n deadline = float(\"inf\")\n else:\n@@ -117,7 +114,7 @@\n # from libraries like eventlet/greenlet.\n try:\n poll_obj = select.poll()\n- poll_obj.poll(0)\n+ _retry_on_intr(poll_obj.poll, 0)\n except (AttributeError, OSError):\n return False\n else:\n", "issue": "Interrupted system call while profiling with plop\nUsing Python 2.7.12, requests 2.19.0, urllib3 1.23 and using [plop](https://github.com/bdarnell/plop) for profiling, I'm intermittently hitting this stack trace in long-running code:\r\n```\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py\", line 525, in get\r\n return self.request('GET', url, **kwargs)\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py\", line 512, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py\", line 622, in send\r\n r = adapter.send(request, **kwargs)\r\n File \"/home/bmerry/work/sdp/git/katdal/katdal/chunkstore_s3.py\", line 56, in send\r\n return super(_TimeoutHTTPAdapter, self).send(request, stream, timeout, *args, **kwargs)\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/adapters.py\", line 445, in send\r\n timeout=timeout\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/connectionpool.py\", line 588, in urlopen\r\n conn = self._get_conn(timeout=pool_timeout)\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/connectionpool.py\", line 239, in _get_conn\r\n if conn and is_connection_dropped(conn):\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/connection.py\", line 23, in is_connection_dropped\r\n return wait_for_read(sock, timeout=0.0)\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py\", line 146, in wait_for_read\r\n return wait_for_socket(sock, read=True, timeout=timeout)\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py\", line 107, in poll_wait_for_socket\r\n return bool(_retry_on_intr(do_poll, timeout))\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py\", line 47, in _retry_on_intr\r\n return fn(timeout)\r\n File \"/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py\", line 105, in do_poll\r\n return poll_obj.poll(t)\r\nselect.error: (4, 'Interrupted system call')\r\nProfiling timer expired\r\n```\r\n\r\nLooking at the implementation of `_retry_on_intr` for older Pythons, it has this special case:\r\n```python\r\n if timeout is not None and timeout <= 0:\r\n return fn(timeout)\r\n```\r\nwhich in turn seems to apply to the call stack above (see is_connection_dropped, which passes a timeout of 0.0). So apparently there are cases where poll can fail with EINTR even with a zero timeout. FWIW, I'm running Ubuntu 16.04 and Linux 4.4.0-116-generic. I'll try commenting out that fast path and doing some testing overnight to confirm that that is the problem.\r\n\r\nI don't yet had a minimal reproducible example, but I'll work on it (my first attempt of just banging on some URL in a loop hasn't worked). I wanted to file this before I forgot.\n", "before_files": [{"content": "import errno\nfrom functools import partial\nimport select\nimport sys\ntry:\n from time import monotonic\nexcept ImportError:\n from time import time as monotonic\n\n__all__ = [\"NoWayToWaitForSocketError\", \"wait_for_read\", \"wait_for_write\"]\n\n\nclass NoWayToWaitForSocketError(Exception):\n pass\n\n\n# How should we wait on sockets?\n#\n# There are two types of APIs you can use for waiting on sockets: the fancy\n# modern stateful APIs like epoll/kqueue, and the older stateless APIs like\n# select/poll. The stateful APIs are more efficient when you have a lots of\n# sockets to keep track of, because you can set them up once and then use them\n# lots of times. But we only ever want to wait on a single socket at a time\n# and don't want to keep track of state, so the stateless APIs are actually\n# more efficient. So we want to use select() or poll().\n#\n# Now, how do we choose between select() and poll()? On traditional Unixes,\n# select() has a strange calling convention that makes it slow, or fail\n# altogether, for high-numbered file descriptors. The point of poll() is to fix\n# that, so on Unixes, we prefer poll().\n#\n# On Windows, there is no poll() (or at least Python doesn't provide a wrapper\n# for it), but that's OK, because on Windows, select() doesn't have this\n# strange calling convention; plain select() works fine.\n#\n# So: on Windows we use select(), and everywhere else we use poll(). We also\n# fall back to select() in case poll() is somehow broken or missing.\n\nif sys.version_info >= (3, 5):\n # Modern Python, that retries syscalls by default\n def _retry_on_intr(fn, timeout):\n return fn(timeout)\nelse:\n # Old and broken Pythons.\n def _retry_on_intr(fn, timeout):\n if timeout is not None and timeout <= 0:\n return fn(timeout)\n\n if timeout is None:\n deadline = float(\"inf\")\n else:\n deadline = monotonic() + timeout\n\n while True:\n try:\n return fn(timeout)\n # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7\n except (OSError, select.error) as e:\n # 'e.args[0]' incantation works for both OSError and select.error\n if e.args[0] != errno.EINTR:\n raise\n else:\n timeout = deadline - monotonic()\n if timeout < 0:\n timeout = 0\n if timeout == float(\"inf\"):\n timeout = None\n continue\n\n\ndef select_wait_for_socket(sock, read=False, write=False, timeout=None):\n if not read and not write:\n raise RuntimeError(\"must specify at least one of read=True, write=True\")\n rcheck = []\n wcheck = []\n if read:\n rcheck.append(sock)\n if write:\n wcheck.append(sock)\n # When doing a non-blocking connect, most systems signal success by\n # marking the socket writable. Windows, though, signals success by marked\n # it as \"exceptional\". We paper over the difference by checking the write\n # sockets for both conditions. (The stdlib selectors module does the same\n # thing.)\n fn = partial(select.select, rcheck, wcheck, wcheck)\n rready, wready, xready = _retry_on_intr(fn, timeout)\n return bool(rready or wready or xready)\n\n\ndef poll_wait_for_socket(sock, read=False, write=False, timeout=None):\n if not read and not write:\n raise RuntimeError(\"must specify at least one of read=True, write=True\")\n mask = 0\n if read:\n mask |= select.POLLIN\n if write:\n mask |= select.POLLOUT\n poll_obj = select.poll()\n poll_obj.register(sock, mask)\n\n # For some reason, poll() takes timeout in milliseconds\n def do_poll(t):\n if t is not None:\n t *= 1000\n return poll_obj.poll(t)\n\n return bool(_retry_on_intr(do_poll, timeout))\n\n\ndef null_wait_for_socket(*args, **kwargs):\n raise NoWayToWaitForSocketError(\"no select-equivalent available\")\n\n\ndef _have_working_poll():\n # Apparently some systems have a select.poll that fails as soon as you try\n # to use it, either due to strange configuration or broken monkeypatching\n # from libraries like eventlet/greenlet.\n try:\n poll_obj = select.poll()\n poll_obj.poll(0)\n except (AttributeError, OSError):\n return False\n else:\n return True\n\n\ndef wait_for_socket(*args, **kwargs):\n # We delay choosing which implementation to use until the first time we're\n # called. We could do it at import time, but then we might make the wrong\n # decision if someone goes wild with monkeypatching select.poll after\n # we're imported.\n global wait_for_socket\n if _have_working_poll():\n wait_for_socket = poll_wait_for_socket\n elif hasattr(select, \"select\"):\n wait_for_socket = select_wait_for_socket\n else: # Platform-specific: Appengine.\n wait_for_socket = null_wait_for_socket\n return wait_for_socket(*args, **kwargs)\n\n\ndef wait_for_read(sock, timeout=None):\n \"\"\" Waits for reading to be available on a given socket.\n Returns True if the socket is readable, or False if the timeout expired.\n \"\"\"\n return wait_for_socket(sock, read=True, timeout=timeout)\n\n\ndef wait_for_write(sock, timeout=None):\n \"\"\" Waits for writing to be available on a given socket.\n Returns True if the socket is readable, or False if the timeout expired.\n \"\"\"\n return wait_for_socket(sock, write=True, timeout=timeout)\n", "path": "urllib3/util/wait.py"}]} | 3,028 | 181 |
gh_patches_debug_4386 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2414 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Did you mean to use f-string here?
Did you mean to use f-string here?
_Originally posted by @lonewolf3739 in https://github.com/open-telemetry/opentelemetry-python/pull/2405#discussion_r792096137_
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from atexit import register, unregister
16 from logging import getLogger
17 from threading import Lock
18 from typing import Optional, Sequence
19
20 from opentelemetry._metrics import Meter as APIMeter
21 from opentelemetry._metrics import MeterProvider as APIMeterProvider
22 from opentelemetry._metrics import NoOpMeter
23 from opentelemetry._metrics.instrument import Counter as APICounter
24 from opentelemetry._metrics.instrument import Histogram as APIHistogram
25 from opentelemetry._metrics.instrument import (
26 ObservableCounter as APIObservableCounter,
27 )
28 from opentelemetry._metrics.instrument import (
29 ObservableGauge as APIObservableGauge,
30 )
31 from opentelemetry._metrics.instrument import (
32 ObservableUpDownCounter as APIObservableUpDownCounter,
33 )
34 from opentelemetry._metrics.instrument import UpDownCounter as APIUpDownCounter
35 from opentelemetry.sdk._metrics.instrument import (
36 Counter,
37 Histogram,
38 ObservableCounter,
39 ObservableGauge,
40 ObservableUpDownCounter,
41 UpDownCounter,
42 )
43 from opentelemetry.sdk._metrics.measurement_consumer import (
44 MeasurementConsumer,
45 SynchronousMeasurementConsumer,
46 )
47 from opentelemetry.sdk._metrics.metric_reader import MetricReader
48 from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
49 from opentelemetry.sdk.resources import Resource
50 from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
51
52 _logger = getLogger(__name__)
53
54
55 class Meter(APIMeter):
56 def __init__(
57 self,
58 instrumentation_info: InstrumentationInfo,
59 measurement_consumer: MeasurementConsumer,
60 ):
61 super().__init__(instrumentation_info)
62 self._instrumentation_info = instrumentation_info
63 self._measurement_consumer = measurement_consumer
64
65 def create_counter(self, name, unit=None, description=None) -> APICounter:
66 return Counter(
67 name,
68 self._instrumentation_info,
69 self._measurement_consumer,
70 unit,
71 description,
72 )
73
74 def create_up_down_counter(
75 self, name, unit=None, description=None
76 ) -> APIUpDownCounter:
77 return UpDownCounter(
78 name,
79 self._instrumentation_info,
80 self._measurement_consumer,
81 unit,
82 description,
83 )
84
85 def create_observable_counter(
86 self, name, callback, unit=None, description=None
87 ) -> APIObservableCounter:
88
89 instrument = ObservableCounter(
90 name,
91 self._instrumentation_info,
92 self._measurement_consumer,
93 callback,
94 unit,
95 description,
96 )
97
98 self._measurement_consumer.register_asynchronous_instrument(instrument)
99
100 return instrument
101
102 def create_histogram(
103 self, name, unit=None, description=None
104 ) -> APIHistogram:
105 return Histogram(
106 name,
107 self._instrumentation_info,
108 self._measurement_consumer,
109 unit,
110 description,
111 )
112
113 def create_observable_gauge(
114 self, name, callback, unit=None, description=None
115 ) -> APIObservableGauge:
116
117 instrument = ObservableGauge(
118 name,
119 self._instrumentation_info,
120 self._measurement_consumer,
121 callback,
122 unit,
123 description,
124 )
125
126 self._measurement_consumer.register_asynchronous_instrument(instrument)
127
128 return instrument
129
130 def create_observable_up_down_counter(
131 self, name, callback, unit=None, description=None
132 ) -> APIObservableUpDownCounter:
133
134 instrument = ObservableUpDownCounter(
135 name,
136 self._instrumentation_info,
137 self._measurement_consumer,
138 callback,
139 unit,
140 description,
141 )
142
143 self._measurement_consumer.register_asynchronous_instrument(instrument)
144
145 return instrument
146
147
148 class MeterProvider(APIMeterProvider):
149 """See `opentelemetry._metrics.MeterProvider`."""
150
151 def __init__(
152 self,
153 metric_readers: Sequence[MetricReader] = (),
154 resource: Resource = Resource.create({}),
155 shutdown_on_exit: bool = True,
156 ):
157 self._lock = Lock()
158 self._meter_lock = Lock()
159 self._atexit_handler = None
160 self._sdk_config = SdkConfiguration(
161 resource=resource, metric_readers=metric_readers
162 )
163 self._measurement_consumer = SynchronousMeasurementConsumer(
164 sdk_config=self._sdk_config
165 )
166
167 if shutdown_on_exit:
168 self._atexit_handler = register(self.shutdown)
169
170 self._meters = {}
171 self._metric_readers = metric_readers
172
173 for metric_reader in self._sdk_config.metric_readers:
174 metric_reader._register_measurement_consumer(self)
175
176 self._shutdown = False
177
178 def force_flush(self) -> bool:
179
180 # FIXME implement a timeout
181
182 metric_reader_result = True
183
184 for metric_reader in self._sdk_config.metric_readers:
185 metric_reader_result = (
186 metric_reader_result and metric_reader.force_flush()
187 )
188
189 if not metric_reader_result:
190 _logger.warning("Unable to force flush all metric readers")
191
192 return metric_reader_result
193
194 def shutdown(self):
195 # FIXME implement a timeout
196
197 if self._shutdown:
198 _logger.warning("shutdown can only be called once")
199 return False
200
201 overall_result = True
202
203 for metric_reader in self._sdk_config.metric_readers:
204 metric_reader_result = metric_reader.shutdown()
205
206 if not metric_reader_result:
207 _logger.warning(
208 "MetricReader {metric_reader} failed to shutdown"
209 )
210
211 overall_result = overall_result and metric_reader_result
212
213 self._shutdown = True
214
215 if self._atexit_handler is not None:
216 unregister(self._atexit_handler)
217 self._atexit_handler = None
218
219 return overall_result
220
221 def get_meter(
222 self,
223 name: str,
224 version: Optional[str] = None,
225 schema_url: Optional[str] = None,
226 ) -> Meter:
227
228 if self._shutdown:
229 _logger.warning(
230 "A shutdown `MeterProvider` can not provide a `Meter`"
231 )
232 return NoOpMeter(name, version=version, schema_url=schema_url)
233
234 info = InstrumentationInfo(name, version, schema_url)
235 with self._meter_lock:
236 if not self._meters.get(info):
237 self._meters[info] = Meter(
238 info,
239 self._measurement_consumer,
240 )
241 return self._meters[info]
242
[end of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py
@@ -205,7 +205,7 @@
if not metric_reader_result:
_logger.warning(
- "MetricReader {metric_reader} failed to shutdown"
+ "MetricReader %s failed to shutdown", metric_reader
)
overall_result = overall_result and metric_reader_result
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py\n@@ -205,7 +205,7 @@\n \n if not metric_reader_result:\n _logger.warning(\n- \"MetricReader {metric_reader} failed to shutdown\"\n+ \"MetricReader %s failed to shutdown\", metric_reader\n )\n \n overall_result = overall_result and metric_reader_result\n", "issue": "Did you mean to use f-string here?\nDid you mean to use f-string here?\r\n\r\n_Originally posted by @lonewolf3739 in https://github.com/open-telemetry/opentelemetry-python/pull/2405#discussion_r792096137_\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom atexit import register, unregister\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import Optional, Sequence\n\nfrom opentelemetry._metrics import Meter as APIMeter\nfrom opentelemetry._metrics import MeterProvider as APIMeterProvider\nfrom opentelemetry._metrics import NoOpMeter\nfrom opentelemetry._metrics.instrument import Counter as APICounter\nfrom opentelemetry._metrics.instrument import Histogram as APIHistogram\nfrom opentelemetry._metrics.instrument import (\n ObservableCounter as APIObservableCounter,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableGauge as APIObservableGauge,\n)\nfrom opentelemetry._metrics.instrument import (\n ObservableUpDownCounter as APIObservableUpDownCounter,\n)\nfrom opentelemetry._metrics.instrument import UpDownCounter as APIUpDownCounter\nfrom opentelemetry.sdk._metrics.instrument import (\n Counter,\n Histogram,\n ObservableCounter,\n ObservableGauge,\n ObservableUpDownCounter,\n UpDownCounter,\n)\nfrom opentelemetry.sdk._metrics.measurement_consumer import (\n MeasurementConsumer,\n SynchronousMeasurementConsumer,\n)\nfrom opentelemetry.sdk._metrics.metric_reader import MetricReader\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.util.instrumentation import InstrumentationInfo\n\n_logger = getLogger(__name__)\n\n\nclass Meter(APIMeter):\n def __init__(\n self,\n instrumentation_info: InstrumentationInfo,\n measurement_consumer: MeasurementConsumer,\n ):\n super().__init__(instrumentation_info)\n self._instrumentation_info = instrumentation_info\n self._measurement_consumer = measurement_consumer\n\n def create_counter(self, name, unit=None, description=None) -> APICounter:\n return Counter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_up_down_counter(\n self, name, unit=None, description=None\n ) -> APIUpDownCounter:\n return UpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableCounter:\n\n instrument = ObservableCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_histogram(\n self, name, unit=None, description=None\n ) -> APIHistogram:\n return Histogram(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n unit,\n description,\n )\n\n def create_observable_gauge(\n self, name, callback, unit=None, description=None\n ) -> APIObservableGauge:\n\n instrument = ObservableGauge(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n def create_observable_up_down_counter(\n self, name, callback, unit=None, description=None\n ) -> APIObservableUpDownCounter:\n\n instrument = ObservableUpDownCounter(\n name,\n self._instrumentation_info,\n self._measurement_consumer,\n callback,\n unit,\n description,\n )\n\n self._measurement_consumer.register_asynchronous_instrument(instrument)\n\n return instrument\n\n\nclass MeterProvider(APIMeterProvider):\n \"\"\"See `opentelemetry._metrics.MeterProvider`.\"\"\"\n\n def __init__(\n self,\n metric_readers: Sequence[MetricReader] = (),\n resource: Resource = Resource.create({}),\n shutdown_on_exit: bool = True,\n ):\n self._lock = Lock()\n self._meter_lock = Lock()\n self._atexit_handler = None\n self._sdk_config = SdkConfiguration(\n resource=resource, metric_readers=metric_readers\n )\n self._measurement_consumer = SynchronousMeasurementConsumer(\n sdk_config=self._sdk_config\n )\n\n if shutdown_on_exit:\n self._atexit_handler = register(self.shutdown)\n\n self._meters = {}\n self._metric_readers = metric_readers\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader._register_measurement_consumer(self)\n\n self._shutdown = False\n\n def force_flush(self) -> bool:\n\n # FIXME implement a timeout\n\n metric_reader_result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader_result = (\n metric_reader_result and metric_reader.force_flush()\n )\n\n if not metric_reader_result:\n _logger.warning(\"Unable to force flush all metric readers\")\n\n return metric_reader_result\n\n def shutdown(self):\n # FIXME implement a timeout\n\n if self._shutdown:\n _logger.warning(\"shutdown can only be called once\")\n return False\n\n overall_result = True\n\n for metric_reader in self._sdk_config.metric_readers:\n metric_reader_result = metric_reader.shutdown()\n\n if not metric_reader_result:\n _logger.warning(\n \"MetricReader {metric_reader} failed to shutdown\"\n )\n\n overall_result = overall_result and metric_reader_result\n\n self._shutdown = True\n\n if self._atexit_handler is not None:\n unregister(self._atexit_handler)\n self._atexit_handler = None\n\n return overall_result\n\n def get_meter(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ) -> Meter:\n\n if self._shutdown:\n _logger.warning(\n \"A shutdown `MeterProvider` can not provide a `Meter`\"\n )\n return NoOpMeter(name, version=version, schema_url=schema_url)\n\n info = InstrumentationInfo(name, version, schema_url)\n with self._meter_lock:\n if not self._meters.get(info):\n self._meters[info] = Meter(\n info,\n self._measurement_consumer,\n )\n return self._meters[info]\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/__init__.py"}]} | 2,696 | 140 |
gh_patches_debug_33490 | rasdani/github-patches | git_diff | apache__airflow-22536 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PostgresToGoogleCloudStorageOperator - Custom schema mapping
Version : 1.10.12
I used PostgresToGoogleCloudStorageOperator to export the data and the schema file as well. But I saw a column on Postgres was `TIMESTAMP without time zone` but in BigQuery the auto-create table (via `GoogleCloudStorageToBigQueryOperator`) used the JSON schema file and created the table. When I checked the BQ table the data type was `TIMESTAMP`.
For without timezone data, **`DATETIME`** would be the right choice. So can we manually MAP the data types during the schema file export?
</issue>
<code>
[start of airflow/providers/google/cloud/transfers/postgres_to_gcs.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """PostgreSQL to GCS operator."""
19
20 import datetime
21 import json
22 import time
23 import uuid
24 from decimal import Decimal
25 from typing import Dict
26
27 import pendulum
28
29 from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
30 from airflow.providers.postgres.hooks.postgres import PostgresHook
31
32
33 class _PostgresServerSideCursorDecorator:
34 """
35 Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.
36
37 Decorator for allowing description to be available for postgres cursor in case server side
38 cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,
39 which is more of a safety feature.
40 """
41
42 def __init__(self, cursor):
43 self.cursor = cursor
44 self.rows = []
45 self.initialized = False
46
47 def __iter__(self):
48 return self
49
50 def __next__(self):
51 if self.rows:
52 return self.rows.pop()
53 else:
54 self.initialized = True
55 return next(self.cursor)
56
57 @property
58 def description(self):
59 """Fetch first row to initialize cursor description when using server side cursor."""
60 if not self.initialized:
61 element = self.cursor.fetchone()
62 if element is not None:
63 self.rows.append(element)
64 self.initialized = True
65 return self.cursor.description
66
67
68 class PostgresToGCSOperator(BaseSQLToGCSOperator):
69 """
70 Copy data from Postgres to Google Cloud Storage in JSON or CSV format.
71
72 :param postgres_conn_id: Reference to a specific Postgres hook.
73 :param use_server_side_cursor: If server-side cursor should be used for querying postgres.
74 For detailed info, check https://www.psycopg.org/docs/usage.html#server-side-cursors
75 :param cursor_itersize: How many records are fetched at a time in case of server-side cursor.
76 """
77
78 ui_color = '#a0e08c'
79
80 type_map = {
81 1114: 'TIMESTAMP',
82 1184: 'TIMESTAMP',
83 1082: 'TIMESTAMP',
84 1083: 'TIMESTAMP',
85 1005: 'INTEGER',
86 1007: 'INTEGER',
87 1016: 'INTEGER',
88 20: 'INTEGER',
89 21: 'INTEGER',
90 23: 'INTEGER',
91 16: 'BOOLEAN',
92 700: 'FLOAT',
93 701: 'FLOAT',
94 1700: 'FLOAT',
95 }
96
97 def __init__(
98 self,
99 *,
100 postgres_conn_id='postgres_default',
101 use_server_side_cursor=False,
102 cursor_itersize=2000,
103 **kwargs,
104 ):
105 super().__init__(**kwargs)
106 self.postgres_conn_id = postgres_conn_id
107 self.use_server_side_cursor = use_server_side_cursor
108 self.cursor_itersize = cursor_itersize
109
110 def _unique_name(self):
111 return f"{self.dag_id}__{self.task_id}__{uuid.uuid4()}" if self.use_server_side_cursor else None
112
113 def query(self):
114 """Queries Postgres and returns a cursor to the results."""
115 hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
116 conn = hook.get_conn()
117 cursor = conn.cursor(name=self._unique_name())
118 cursor.execute(self.sql, self.parameters)
119 if self.use_server_side_cursor:
120 cursor.itersize = self.cursor_itersize
121 return _PostgresServerSideCursorDecorator(cursor)
122 return cursor
123
124 def field_to_bigquery(self, field) -> Dict[str, str]:
125 return {
126 'name': field[0],
127 'type': self.type_map.get(field[1], "STRING"),
128 'mode': 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE',
129 }
130
131 def convert_type(self, value, schema_type):
132 """
133 Takes a value from Postgres, and converts it to a value that's safe for
134 JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.
135 Decimals are converted to floats. Times are converted to seconds.
136 """
137 if isinstance(value, (datetime.datetime, datetime.date)):
138 return pendulum.parse(value.isoformat()).float_timestamp
139 if isinstance(value, datetime.time):
140 formatted_time = time.strptime(str(value), "%H:%M:%S")
141 return int(
142 datetime.timedelta(
143 hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
144 ).total_seconds()
145 )
146 if isinstance(value, dict):
147 return json.dumps(value)
148 if isinstance(value, Decimal):
149 return float(value)
150 return value
151
[end of airflow/providers/google/cloud/transfers/postgres_to_gcs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
--- a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
+++ b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
@@ -78,10 +78,10 @@
ui_color = '#a0e08c'
type_map = {
- 1114: 'TIMESTAMP',
+ 1114: 'DATETIME',
1184: 'TIMESTAMP',
- 1082: 'TIMESTAMP',
- 1083: 'TIMESTAMP',
+ 1082: 'DATE',
+ 1083: 'TIME',
1005: 'INTEGER',
1007: 'INTEGER',
1016: 'INTEGER',
@@ -131,18 +131,24 @@
def convert_type(self, value, schema_type):
"""
Takes a value from Postgres, and converts it to a value that's safe for
- JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.
- Decimals are converted to floats. Times are converted to seconds.
+ JSON/Google Cloud Storage/BigQuery.
+ Timezone aware Datetime are converted to UTC seconds.
+ Unaware Datetime, Date and Time are converted to ISO formatted strings.
+ Decimals are converted to floats.
"""
- if isinstance(value, (datetime.datetime, datetime.date)):
- return pendulum.parse(value.isoformat()).float_timestamp
+ if isinstance(value, datetime.datetime):
+ iso_format_value = value.isoformat()
+ if value.tzinfo is None:
+ return iso_format_value
+ return pendulum.parse(iso_format_value).float_timestamp
+ if isinstance(value, datetime.date):
+ return value.isoformat()
if isinstance(value, datetime.time):
formatted_time = time.strptime(str(value), "%H:%M:%S")
- return int(
- datetime.timedelta(
- hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
- ).total_seconds()
+ time_delta = datetime.timedelta(
+ hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
)
+ return str(time_delta)
if isinstance(value, dict):
return json.dumps(value)
if isinstance(value, Decimal):
| {"golden_diff": "diff --git a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py\n--- a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py\n+++ b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py\n@@ -78,10 +78,10 @@\n ui_color = '#a0e08c'\n \n type_map = {\n- 1114: 'TIMESTAMP',\n+ 1114: 'DATETIME',\n 1184: 'TIMESTAMP',\n- 1082: 'TIMESTAMP',\n- 1083: 'TIMESTAMP',\n+ 1082: 'DATE',\n+ 1083: 'TIME',\n 1005: 'INTEGER',\n 1007: 'INTEGER',\n 1016: 'INTEGER',\n@@ -131,18 +131,24 @@\n def convert_type(self, value, schema_type):\n \"\"\"\n Takes a value from Postgres, and converts it to a value that's safe for\n- JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.\n- Decimals are converted to floats. Times are converted to seconds.\n+ JSON/Google Cloud Storage/BigQuery.\n+ Timezone aware Datetime are converted to UTC seconds.\n+ Unaware Datetime, Date and Time are converted to ISO formatted strings.\n+ Decimals are converted to floats.\n \"\"\"\n- if isinstance(value, (datetime.datetime, datetime.date)):\n- return pendulum.parse(value.isoformat()).float_timestamp\n+ if isinstance(value, datetime.datetime):\n+ iso_format_value = value.isoformat()\n+ if value.tzinfo is None:\n+ return iso_format_value\n+ return pendulum.parse(iso_format_value).float_timestamp\n+ if isinstance(value, datetime.date):\n+ return value.isoformat()\n if isinstance(value, datetime.time):\n formatted_time = time.strptime(str(value), \"%H:%M:%S\")\n- return int(\n- datetime.timedelta(\n- hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n- ).total_seconds()\n+ time_delta = datetime.timedelta(\n+ hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n )\n+ return str(time_delta)\n if isinstance(value, dict):\n return json.dumps(value)\n if isinstance(value, Decimal):\n", "issue": "PostgresToGoogleCloudStorageOperator - Custom schema mapping\nVersion : 1.10.12\r\n\r\nI used PostgresToGoogleCloudStorageOperator to export the data and the schema file as well. But I saw a column on Postgres was `TIMESTAMP without time zone` but in BigQuery the auto-create table (via `GoogleCloudStorageToBigQueryOperator`) used the JSON schema file and created the table. When I checked the BQ table the data type was `TIMESTAMP`.\r\n\r\nFor without timezone data, **`DATETIME`** would be the right choice. So can we manually MAP the data types during the schema file export? \n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"PostgreSQL to GCS operator.\"\"\"\n\nimport datetime\nimport json\nimport time\nimport uuid\nfrom decimal import Decimal\nfrom typing import Dict\n\nimport pendulum\n\nfrom airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\n\n\nclass _PostgresServerSideCursorDecorator:\n \"\"\"\n Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.\n\n Decorator for allowing description to be available for postgres cursor in case server side\n cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,\n which is more of a safety feature.\n \"\"\"\n\n def __init__(self, cursor):\n self.cursor = cursor\n self.rows = []\n self.initialized = False\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.rows:\n return self.rows.pop()\n else:\n self.initialized = True\n return next(self.cursor)\n\n @property\n def description(self):\n \"\"\"Fetch first row to initialize cursor description when using server side cursor.\"\"\"\n if not self.initialized:\n element = self.cursor.fetchone()\n if element is not None:\n self.rows.append(element)\n self.initialized = True\n return self.cursor.description\n\n\nclass PostgresToGCSOperator(BaseSQLToGCSOperator):\n \"\"\"\n Copy data from Postgres to Google Cloud Storage in JSON or CSV format.\n\n :param postgres_conn_id: Reference to a specific Postgres hook.\n :param use_server_side_cursor: If server-side cursor should be used for querying postgres.\n For detailed info, check https://www.psycopg.org/docs/usage.html#server-side-cursors\n :param cursor_itersize: How many records are fetched at a time in case of server-side cursor.\n \"\"\"\n\n ui_color = '#a0e08c'\n\n type_map = {\n 1114: 'TIMESTAMP',\n 1184: 'TIMESTAMP',\n 1082: 'TIMESTAMP',\n 1083: 'TIMESTAMP',\n 1005: 'INTEGER',\n 1007: 'INTEGER',\n 1016: 'INTEGER',\n 20: 'INTEGER',\n 21: 'INTEGER',\n 23: 'INTEGER',\n 16: 'BOOLEAN',\n 700: 'FLOAT',\n 701: 'FLOAT',\n 1700: 'FLOAT',\n }\n\n def __init__(\n self,\n *,\n postgres_conn_id='postgres_default',\n use_server_side_cursor=False,\n cursor_itersize=2000,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.postgres_conn_id = postgres_conn_id\n self.use_server_side_cursor = use_server_side_cursor\n self.cursor_itersize = cursor_itersize\n\n def _unique_name(self):\n return f\"{self.dag_id}__{self.task_id}__{uuid.uuid4()}\" if self.use_server_side_cursor else None\n\n def query(self):\n \"\"\"Queries Postgres and returns a cursor to the results.\"\"\"\n hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)\n conn = hook.get_conn()\n cursor = conn.cursor(name=self._unique_name())\n cursor.execute(self.sql, self.parameters)\n if self.use_server_side_cursor:\n cursor.itersize = self.cursor_itersize\n return _PostgresServerSideCursorDecorator(cursor)\n return cursor\n\n def field_to_bigquery(self, field) -> Dict[str, str]:\n return {\n 'name': field[0],\n 'type': self.type_map.get(field[1], \"STRING\"),\n 'mode': 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE',\n }\n\n def convert_type(self, value, schema_type):\n \"\"\"\n Takes a value from Postgres, and converts it to a value that's safe for\n JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.\n Decimals are converted to floats. Times are converted to seconds.\n \"\"\"\n if isinstance(value, (datetime.datetime, datetime.date)):\n return pendulum.parse(value.isoformat()).float_timestamp\n if isinstance(value, datetime.time):\n formatted_time = time.strptime(str(value), \"%H:%M:%S\")\n return int(\n datetime.timedelta(\n hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n ).total_seconds()\n )\n if isinstance(value, dict):\n return json.dumps(value)\n if isinstance(value, Decimal):\n return float(value)\n return value\n", "path": "airflow/providers/google/cloud/transfers/postgres_to_gcs.py"}]} | 2,256 | 559 |
gh_patches_debug_21113 | rasdani/github-patches | git_diff | sktime__sktime-6183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Unusual if statement in _lower_bounding_numba.py
**Describe the bug**
<!--
A clear and concise description of what the bug is.
--> If statement with same code in both branches
**To Reproduce**
<!--
Add a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve
If the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com
--> See def create_shape_on_matrix, specifically lines 63-68
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
--> In the else statement, I would expect ceil and floor to be exchanged
**Versions**
0.27.0
<!--
Please run the following code snippet and paste the output here:
from sktime import show_versions; show_versions()
-->
System:
python: 3.12.0 (v3.12.0:0fb18b02c8, Oct 2 2023, 09:45:56) [Clang 13.0.0 (clang-1300.0.29.30)]
executable: /path
machine: macOS-14.4-arm64-arm-64bit
Python dependencies:
pip: 24.0
sktime: 0.27.0
sklearn: 1.4.1.post1
skbase: 0.7.5
numpy: 1.26.4
scipy: 1.12.0
pandas: 2.1.4
matplotlib: 3.8.3
joblib: 1.3.2
numba: 0.59.0
statsmodels: None
pmdarima: None
statsforecast: None
tsfresh: None
tslearn: None
torch: None
tensorflow: None
tensorflow_probability: None
Backend MacOSX is interactive backend. Turning interactive mode on.
</details>
<!-- Thanks for contributing! -->
</issue>
<code>
[start of sktime/distances/_lower_bounding_numba.py]
1 """Isolated numba imports for lower_bounding."""
2
3 __author__ = ["chrisholder", "TonyBagnall"]
4
5 import math
6 from typing import Union
7
8 import numpy as np
9
10 from sktime.utils.numba.njit import njit
11
12
13 @njit(cache=True)
14 def create_shape_on_matrix(
15 bounding_matrix: np.ndarray,
16 y_upper_line: np.ndarray,
17 y_lower_line: Union[np.ndarray, None] = None,
18 x_step_size: int = 1,
19 start_val: int = 0,
20 ) -> np.ndarray:
21 """Create a shape from a given upper line and lower line on a matrix.
22
23 Parameters
24 ----------
25 bounding_matrix: np.ndarray (2d array)
26 Matrix of size mxn where m is len(x) and n is len(y). Values that
27 are inside the shape will be replaced with finite values (0.).
28 y_upper_line: np.ndarray (1d array)
29 Y points of the upper line.
30 y_lower_line: np.ndarray (1d array), defaults = None
31 Y points of the lower line. If no lower line specified, then y_upper_line
32 used as lower line.
33 x_step_size: int, defaults = 1
34 Step size each iteration will increase by
35 start_val: int, defaults = 0
36 Starting coordinate for x
37
38 Returns
39 -------
40 np.ndarray (2d array)
41 Matrix with values of the shape set to 0. (finite), of the same shape
42 as the passed bounding_matrix.
43 """
44 y_size = bounding_matrix.shape[0]
45
46 if y_lower_line is None:
47 y_lower_line = y_upper_line
48
49 upper_line_y_values = y_upper_line.shape[0]
50 lower_line_y_values = y_lower_line.shape[0]
51
52 if upper_line_y_values != lower_line_y_values:
53 raise ValueError(
54 "The number of upper line values must equal the number of lower line "
55 "values"
56 )
57
58 half_way = math.floor(upper_line_y_values / 2)
59
60 for i in range(start_val, upper_line_y_values):
61 x = i * x_step_size
62
63 if i > half_way:
64 upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))
65 lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))
66 else:
67 upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))
68 lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))
69
70 if upper_line_y_values == lower_line_y_values:
71 if upper_y == lower_y:
72 bounding_matrix[upper_y, x] = 0.0
73 else:
74 bounding_matrix[upper_y : (lower_y + 1), x] = 0.0
75 else:
76 bounding_matrix[upper_y, x] = 0.0
77 bounding_matrix[lower_y, x] = 0.0
78
79 return bounding_matrix
80
81
82 @njit(cache=True)
83 def _check_line_steps(line: np.ndarray) -> np.ndarray:
84 """Check the next 'step' is along the line.
85
86 Parameters
87 ----------
88 line: np.ndarray
89 line to check steps.
90
91 Returns
92 -------
93 np.ndarray
94 Line with updated indexes.
95 """
96 prev = line[0]
97 for i in range(1, len(line)):
98 curr_val = line[i]
99 if curr_val > (prev + 1):
100 line[i] = prev + 1
101 elif curr_val < (prev - 1):
102 line[i] = prev - 1
103 prev = curr_val
104 return line
105
106
107 @njit(cache=True)
108 def no_bounding(x: np.ndarray, y: np.ndarray) -> np.ndarray:
109 """Create a matrix with no bounding.
110
111 Parameters
112 ----------
113 x: np.ndarray (2d array)
114 First time series.
115 y: np.ndarray (2d array)
116 Second time series.
117
118 Returns
119 -------
120 np.ndarray (2d of size mxn where m is len(x) and n is len(y)).
121 Bounding matrix where the values inside the bound are finite values (0s) and
122 outside the bounds are infinity (non finite).
123 """
124 return np.zeros((x.shape[1], y.shape[1]))
125
126
127 @njit(cache=True)
128 def sakoe_chiba(x: np.ndarray, y: np.ndarray, window: float) -> np.ndarray:
129 """Create a sakoe chiba lower bounding window on a matrix.
130
131 Parameters
132 ----------
133 x: np.ndarray (2d array)
134 First time series.
135 y: np.ndarray (2d array)
136 Second time series.
137 window: float
138 Float that is the size of the window. Must be between 0 and 1.
139
140 Returns
141 -------
142 np.ndarray (2d of size mxn where m is len(x) and n is len(y)).
143 Sakoe Chiba bounding matrix where the values inside the bound are finite
144 values (0s) and outside the bounds are infinity (non finite).
145
146 Raises
147 ------
148 ValueError
149 If the sakoe_chiba_window_radius is not an integer.
150 """
151 if window < 0 or window > 1:
152 raise ValueError("Window must between 0 and 1")
153
154 x_size = x.shape[1]
155 y_size = y.shape[1]
156 bounding_matrix = np.full((x_size, y_size), np.inf)
157 sakoe_chiba_window_radius = ((x_size / 100) * window) * 100
158
159 x_upper_line_values = np.interp(
160 list(range(x_size)),
161 [0, x_size - 1],
162 [0 - sakoe_chiba_window_radius, y_size - sakoe_chiba_window_radius - 1],
163 )
164 x_lower_line_values = np.interp(
165 list(range(x_size)),
166 [0, x_size - 1],
167 [0 + sakoe_chiba_window_radius, y_size + sakoe_chiba_window_radius - 1],
168 )
169
170 bounding_matrix = create_shape_on_matrix(
171 bounding_matrix, x_upper_line_values, x_lower_line_values
172 )
173
174 return bounding_matrix
175
176
177 @njit(cache=True)
178 def itakura_parallelogram(
179 x: np.ndarray, y: np.ndarray, itakura_max_slope: float
180 ) -> np.ndarray:
181 """Create a itakura parallelogram bounding matrix.
182
183 Parameters
184 ----------
185 x: np.ndarray (2d array)
186 First time series.
187 y: np.ndarray (2d array)
188 Second time series.
189 itakura_max_slope: float or int
190 Gradient of the slope must be between 0 and 1.
191
192 Returns
193 -------
194 np.ndarray (2d of size mxn where m is len(x) and n is len(y)).
195 Sakoe Chiba bounding matrix where the values inside the bound are finite
196 values (0s) and outside the bounds are infinity (non finite).
197
198 Raises
199 ------
200 ValueError
201 If the itakura_max_slope is not a float or int.
202 """
203 if itakura_max_slope < 0 or itakura_max_slope > 1:
204 raise ValueError("Window must between 0 and 1")
205 x_size = x.shape[1]
206 y_size = y.shape[1]
207 bounding_matrix = np.full((y_size, x_size), np.inf)
208 itakura_max_slope = math.floor(((x_size / 100) * itakura_max_slope) * 100) / 2
209
210 middle_x_upper = math.ceil(x_size / 2)
211 middle_x_lower = math.floor(x_size / 2)
212 if middle_x_lower == middle_x_upper:
213 middle_x_lower = middle_x_lower - 1
214 middle_y = math.floor(y_size / 2)
215
216 difference_from_middle_y = abs((middle_x_lower * itakura_max_slope) - middle_y)
217 middle_y_lower = middle_y + difference_from_middle_y
218 middle_y_upper = middle_y - difference_from_middle_y
219
220 x_upper_line_values = np.interp(
221 list(range(x_size)),
222 [0, middle_x_lower, middle_x_upper, x_size - 1],
223 [0, middle_y_upper, middle_y_upper, y_size - 1],
224 )
225 x_lower_line_values = np.interp(
226 list(range(x_size)),
227 [0, middle_x_lower, middle_x_upper, x_size - 1],
228 [0, middle_y_lower, middle_y_lower, y_size - 1],
229 )
230
231 if np.array_equal(x_upper_line_values, x_lower_line_values):
232 x_upper_line_values = _check_line_steps(x_upper_line_values)
233
234 bounding_matrix = create_shape_on_matrix(
235 bounding_matrix, x_upper_line_values, x_lower_line_values
236 )
237
238 return bounding_matrix
239
240
241 @njit(cache=True)
242 def numba_create_bounding_matrix(
243 x: np.ndarray,
244 y: np.ndarray,
245 window: float = -1.0,
246 itakura_max_slope: float = -1.0,
247 ) -> np.ndarray:
248 """Numba compiled way of creating bounding matrix.
249
250 Parameters
251 ----------
252 x: np.ndarray (2d array)
253 First time series.
254 y: np.ndarray (2d array)
255 Second time series.
256 window: float, defaults = -1.
257 Float that is the % radius of the sakoe chiba window (if using Sakoe-Chiba
258 lower bounding). Must be between 0 and 1.
259 itakura_max_slope: float, defaults = -1.
260 Gradient of the slope for itakura parallelogram (if using Itakura
261 Parallelogram lower bounding). Must be between 0 and 1.
262 """
263 if window != -1.0:
264 bounding_matrix = sakoe_chiba(x, y, window)
265 elif itakura_max_slope != -1.0:
266 bounding_matrix = itakura_parallelogram(x, y, itakura_max_slope)
267 else:
268 bounding_matrix = no_bounding(x, y)
269
270 return bounding_matrix
271
[end of sktime/distances/_lower_bounding_numba.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/distances/_lower_bounding_numba.py b/sktime/distances/_lower_bounding_numba.py
--- a/sktime/distances/_lower_bounding_numba.py
+++ b/sktime/distances/_lower_bounding_numba.py
@@ -55,26 +55,16 @@
"values"
)
- half_way = math.floor(upper_line_y_values / 2)
-
for i in range(start_val, upper_line_y_values):
x = i * x_step_size
- if i > half_way:
- upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))
- lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))
- else:
- upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))
- lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))
-
- if upper_line_y_values == lower_line_y_values:
- if upper_y == lower_y:
- bounding_matrix[upper_y, x] = 0.0
- else:
- bounding_matrix[upper_y : (lower_y + 1), x] = 0.0
- else:
+ upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))
+ lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))
+
+ if upper_y == lower_y:
bounding_matrix[upper_y, x] = 0.0
- bounding_matrix[lower_y, x] = 0.0
+ else:
+ bounding_matrix[upper_y : (lower_y + 1), x] = 0.0
return bounding_matrix
| {"golden_diff": "diff --git a/sktime/distances/_lower_bounding_numba.py b/sktime/distances/_lower_bounding_numba.py\n--- a/sktime/distances/_lower_bounding_numba.py\n+++ b/sktime/distances/_lower_bounding_numba.py\n@@ -55,26 +55,16 @@\n \"values\"\n )\n \n- half_way = math.floor(upper_line_y_values / 2)\n-\n for i in range(start_val, upper_line_y_values):\n x = i * x_step_size\n \n- if i > half_way:\n- upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))\n- lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))\n- else:\n- upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))\n- lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))\n-\n- if upper_line_y_values == lower_line_y_values:\n- if upper_y == lower_y:\n- bounding_matrix[upper_y, x] = 0.0\n- else:\n- bounding_matrix[upper_y : (lower_y + 1), x] = 0.0\n- else:\n+ upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))\n+ lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))\n+\n+ if upper_y == lower_y:\n bounding_matrix[upper_y, x] = 0.0\n- bounding_matrix[lower_y, x] = 0.0\n+ else:\n+ bounding_matrix[upper_y : (lower_y + 1), x] = 0.0\n \n return bounding_matrix\n", "issue": "[BUG] Unusual if statement in _lower_bounding_numba.py\n**Describe the bug**\r\n<!--\r\nA clear and concise description of what the bug is.\r\n--> If statement with same code in both branches\r\n\r\n**To Reproduce**\r\n<!--\r\nAdd a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve\r\n\r\nIf the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com\r\n--> See def create_shape_on_matrix, specifically lines 63-68 \r\n\r\n\r\n**Expected behavior**\r\n<!--\r\nA clear and concise description of what you expected to happen.\r\n--> In the else statement, I would expect ceil and floor to be exchanged\r\n\r\n\r\n**Versions**\r\n0.27.0\r\n\r\n<!--\r\nPlease run the following code snippet and paste the output here:\r\n\r\nfrom sktime import show_versions; show_versions()\r\n-->\r\nSystem:\r\n python: 3.12.0 (v3.12.0:0fb18b02c8, Oct 2 2023, 09:45:56) [Clang 13.0.0 (clang-1300.0.29.30)]\r\nexecutable: /path\r\n machine: macOS-14.4-arm64-arm-64bit\r\n\r\nPython dependencies:\r\n pip: 24.0\r\n sktime: 0.27.0\r\n sklearn: 1.4.1.post1\r\n skbase: 0.7.5\r\n numpy: 1.26.4\r\n scipy: 1.12.0\r\n pandas: 2.1.4\r\n matplotlib: 3.8.3\r\n joblib: 1.3.2\r\n numba: 0.59.0\r\n statsmodels: None\r\n pmdarima: None\r\nstatsforecast: None\r\n tsfresh: None\r\n tslearn: None\r\n torch: None\r\n tensorflow: None\r\ntensorflow_probability: None\r\nBackend MacOSX is interactive backend. Turning interactive mode on.\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "\"\"\"Isolated numba imports for lower_bounding.\"\"\"\n\n__author__ = [\"chrisholder\", \"TonyBagnall\"]\n\nimport math\nfrom typing import Union\n\nimport numpy as np\n\nfrom sktime.utils.numba.njit import njit\n\n\n@njit(cache=True)\ndef create_shape_on_matrix(\n bounding_matrix: np.ndarray,\n y_upper_line: np.ndarray,\n y_lower_line: Union[np.ndarray, None] = None,\n x_step_size: int = 1,\n start_val: int = 0,\n) -> np.ndarray:\n \"\"\"Create a shape from a given upper line and lower line on a matrix.\n\n Parameters\n ----------\n bounding_matrix: np.ndarray (2d array)\n Matrix of size mxn where m is len(x) and n is len(y). Values that\n are inside the shape will be replaced with finite values (0.).\n y_upper_line: np.ndarray (1d array)\n Y points of the upper line.\n y_lower_line: np.ndarray (1d array), defaults = None\n Y points of the lower line. If no lower line specified, then y_upper_line\n used as lower line.\n x_step_size: int, defaults = 1\n Step size each iteration will increase by\n start_val: int, defaults = 0\n Starting coordinate for x\n\n Returns\n -------\n np.ndarray (2d array)\n Matrix with values of the shape set to 0. (finite), of the same shape\n as the passed bounding_matrix.\n \"\"\"\n y_size = bounding_matrix.shape[0]\n\n if y_lower_line is None:\n y_lower_line = y_upper_line\n\n upper_line_y_values = y_upper_line.shape[0]\n lower_line_y_values = y_lower_line.shape[0]\n\n if upper_line_y_values != lower_line_y_values:\n raise ValueError(\n \"The number of upper line values must equal the number of lower line \"\n \"values\"\n )\n\n half_way = math.floor(upper_line_y_values / 2)\n\n for i in range(start_val, upper_line_y_values):\n x = i * x_step_size\n\n if i > half_way:\n upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))\n lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))\n else:\n upper_y = max(0, min(y_size - 1, math.ceil(y_upper_line[i])))\n lower_y = max(0, min(y_size - 1, math.floor(y_lower_line[i])))\n\n if upper_line_y_values == lower_line_y_values:\n if upper_y == lower_y:\n bounding_matrix[upper_y, x] = 0.0\n else:\n bounding_matrix[upper_y : (lower_y + 1), x] = 0.0\n else:\n bounding_matrix[upper_y, x] = 0.0\n bounding_matrix[lower_y, x] = 0.0\n\n return bounding_matrix\n\n\n@njit(cache=True)\ndef _check_line_steps(line: np.ndarray) -> np.ndarray:\n \"\"\"Check the next 'step' is along the line.\n\n Parameters\n ----------\n line: np.ndarray\n line to check steps.\n\n Returns\n -------\n np.ndarray\n Line with updated indexes.\n \"\"\"\n prev = line[0]\n for i in range(1, len(line)):\n curr_val = line[i]\n if curr_val > (prev + 1):\n line[i] = prev + 1\n elif curr_val < (prev - 1):\n line[i] = prev - 1\n prev = curr_val\n return line\n\n\n@njit(cache=True)\ndef no_bounding(x: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"Create a matrix with no bounding.\n\n Parameters\n ----------\n x: np.ndarray (2d array)\n First time series.\n y: np.ndarray (2d array)\n Second time series.\n\n Returns\n -------\n np.ndarray (2d of size mxn where m is len(x) and n is len(y)).\n Bounding matrix where the values inside the bound are finite values (0s) and\n outside the bounds are infinity (non finite).\n \"\"\"\n return np.zeros((x.shape[1], y.shape[1]))\n\n\n@njit(cache=True)\ndef sakoe_chiba(x: np.ndarray, y: np.ndarray, window: float) -> np.ndarray:\n \"\"\"Create a sakoe chiba lower bounding window on a matrix.\n\n Parameters\n ----------\n x: np.ndarray (2d array)\n First time series.\n y: np.ndarray (2d array)\n Second time series.\n window: float\n Float that is the size of the window. Must be between 0 and 1.\n\n Returns\n -------\n np.ndarray (2d of size mxn where m is len(x) and n is len(y)).\n Sakoe Chiba bounding matrix where the values inside the bound are finite\n values (0s) and outside the bounds are infinity (non finite).\n\n Raises\n ------\n ValueError\n If the sakoe_chiba_window_radius is not an integer.\n \"\"\"\n if window < 0 or window > 1:\n raise ValueError(\"Window must between 0 and 1\")\n\n x_size = x.shape[1]\n y_size = y.shape[1]\n bounding_matrix = np.full((x_size, y_size), np.inf)\n sakoe_chiba_window_radius = ((x_size / 100) * window) * 100\n\n x_upper_line_values = np.interp(\n list(range(x_size)),\n [0, x_size - 1],\n [0 - sakoe_chiba_window_radius, y_size - sakoe_chiba_window_radius - 1],\n )\n x_lower_line_values = np.interp(\n list(range(x_size)),\n [0, x_size - 1],\n [0 + sakoe_chiba_window_radius, y_size + sakoe_chiba_window_radius - 1],\n )\n\n bounding_matrix = create_shape_on_matrix(\n bounding_matrix, x_upper_line_values, x_lower_line_values\n )\n\n return bounding_matrix\n\n\n@njit(cache=True)\ndef itakura_parallelogram(\n x: np.ndarray, y: np.ndarray, itakura_max_slope: float\n) -> np.ndarray:\n \"\"\"Create a itakura parallelogram bounding matrix.\n\n Parameters\n ----------\n x: np.ndarray (2d array)\n First time series.\n y: np.ndarray (2d array)\n Second time series.\n itakura_max_slope: float or int\n Gradient of the slope must be between 0 and 1.\n\n Returns\n -------\n np.ndarray (2d of size mxn where m is len(x) and n is len(y)).\n Sakoe Chiba bounding matrix where the values inside the bound are finite\n values (0s) and outside the bounds are infinity (non finite).\n\n Raises\n ------\n ValueError\n If the itakura_max_slope is not a float or int.\n \"\"\"\n if itakura_max_slope < 0 or itakura_max_slope > 1:\n raise ValueError(\"Window must between 0 and 1\")\n x_size = x.shape[1]\n y_size = y.shape[1]\n bounding_matrix = np.full((y_size, x_size), np.inf)\n itakura_max_slope = math.floor(((x_size / 100) * itakura_max_slope) * 100) / 2\n\n middle_x_upper = math.ceil(x_size / 2)\n middle_x_lower = math.floor(x_size / 2)\n if middle_x_lower == middle_x_upper:\n middle_x_lower = middle_x_lower - 1\n middle_y = math.floor(y_size / 2)\n\n difference_from_middle_y = abs((middle_x_lower * itakura_max_slope) - middle_y)\n middle_y_lower = middle_y + difference_from_middle_y\n middle_y_upper = middle_y - difference_from_middle_y\n\n x_upper_line_values = np.interp(\n list(range(x_size)),\n [0, middle_x_lower, middle_x_upper, x_size - 1],\n [0, middle_y_upper, middle_y_upper, y_size - 1],\n )\n x_lower_line_values = np.interp(\n list(range(x_size)),\n [0, middle_x_lower, middle_x_upper, x_size - 1],\n [0, middle_y_lower, middle_y_lower, y_size - 1],\n )\n\n if np.array_equal(x_upper_line_values, x_lower_line_values):\n x_upper_line_values = _check_line_steps(x_upper_line_values)\n\n bounding_matrix = create_shape_on_matrix(\n bounding_matrix, x_upper_line_values, x_lower_line_values\n )\n\n return bounding_matrix\n\n\n@njit(cache=True)\ndef numba_create_bounding_matrix(\n x: np.ndarray,\n y: np.ndarray,\n window: float = -1.0,\n itakura_max_slope: float = -1.0,\n) -> np.ndarray:\n \"\"\"Numba compiled way of creating bounding matrix.\n\n Parameters\n ----------\n x: np.ndarray (2d array)\n First time series.\n y: np.ndarray (2d array)\n Second time series.\n window: float, defaults = -1.\n Float that is the % radius of the sakoe chiba window (if using Sakoe-Chiba\n lower bounding). Must be between 0 and 1.\n itakura_max_slope: float, defaults = -1.\n Gradient of the slope for itakura parallelogram (if using Itakura\n Parallelogram lower bounding). Must be between 0 and 1.\n \"\"\"\n if window != -1.0:\n bounding_matrix = sakoe_chiba(x, y, window)\n elif itakura_max_slope != -1.0:\n bounding_matrix = itakura_parallelogram(x, y, itakura_max_slope)\n else:\n bounding_matrix = no_bounding(x, y)\n\n return bounding_matrix\n", "path": "sktime/distances/_lower_bounding_numba.py"}]} | 3,966 | 413 |
gh_patches_debug_6236 | rasdani/github-patches | git_diff | engnadeau__pybotics-18 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update examples
Examples are now out of sync with current codebase. Potential use for iPython?
</issue>
<code>
[start of examples/example_robot.py]
1 import copy
2
3 import pybotics as py
4 import numpy as np
5
6 # set numpy print options
7 np.set_printoptions(precision=3)
8 np.set_printoptions(suppress=True)
9
10 # create robot
11 model = np.loadtxt('ur10-mdh.csv', delimiter=',')
12 robot = py.Robot(model)
13
14 print('Robot Model:\n{}\n'.format(robot.robot_model))
15
16 # demonstrate forward kinematics
17 joints = [0] * robot.num_dof()
18 pose = robot.fk(joints)
19
20 print('Pose:\n{}\n'.format(pose))
21
22 # demonstrate inverse kinematics
23 new_joints = robot.ik(pose)
24 print('Solved Joints:\n{}\n'.format(new_joints))
25
[end of examples/example_robot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/example_robot.py b/examples/example_robot.py
deleted file mode 100644
--- a/examples/example_robot.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import copy
-
-import pybotics as py
-import numpy as np
-
-# set numpy print options
-np.set_printoptions(precision=3)
-np.set_printoptions(suppress=True)
-
-# create robot
-model = np.loadtxt('ur10-mdh.csv', delimiter=',')
-robot = py.Robot(model)
-
-print('Robot Model:\n{}\n'.format(robot.robot_model))
-
-# demonstrate forward kinematics
-joints = [0] * robot.num_dof()
-pose = robot.fk(joints)
-
-print('Pose:\n{}\n'.format(pose))
-
-# demonstrate inverse kinematics
-new_joints = robot.ik(pose)
-print('Solved Joints:\n{}\n'.format(new_joints))
| {"golden_diff": "diff --git a/examples/example_robot.py b/examples/example_robot.py\ndeleted file mode 100644\n--- a/examples/example_robot.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-import copy\n-\n-import pybotics as py\n-import numpy as np\n-\n-# set numpy print options\n-np.set_printoptions(precision=3)\n-np.set_printoptions(suppress=True)\n-\n-# create robot\n-model = np.loadtxt('ur10-mdh.csv', delimiter=',')\n-robot = py.Robot(model)\n-\n-print('Robot Model:\\n{}\\n'.format(robot.robot_model))\n-\n-# demonstrate forward kinematics\n-joints = [0] * robot.num_dof()\n-pose = robot.fk(joints)\n-\n-print('Pose:\\n{}\\n'.format(pose))\n-\n-# demonstrate inverse kinematics\n-new_joints = robot.ik(pose)\n-print('Solved Joints:\\n{}\\n'.format(new_joints))\n", "issue": "Update examples\nExamples are now out of sync with current codebase. Potential use for iPython?\n", "before_files": [{"content": "import copy\n\nimport pybotics as py\nimport numpy as np\n\n# set numpy print options\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\n\n# create robot\nmodel = np.loadtxt('ur10-mdh.csv', delimiter=',')\nrobot = py.Robot(model)\n\nprint('Robot Model:\\n{}\\n'.format(robot.robot_model))\n\n# demonstrate forward kinematics\njoints = [0] * robot.num_dof()\npose = robot.fk(joints)\n\nprint('Pose:\\n{}\\n'.format(pose))\n\n# demonstrate inverse kinematics\nnew_joints = robot.ik(pose)\nprint('Solved Joints:\\n{}\\n'.format(new_joints))\n", "path": "examples/example_robot.py"}]} | 749 | 212 |
gh_patches_debug_10739 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-2326 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
R package modules have priority over user-installed packages
R has multiple variables that control the library path, `R_LIBS`, `R_LIBS_USER` and `R_LIBS_SITE`.
The path can be inspected with `.libPaths()` and is made up of in order of the entries in `R_LIBS`, `R_LIBS_USER` and `R_LIBS_SITE` and last the R installation itself.
Our R package modules currently prepend their path to `R_LIBS` similar to how `PYTHONPATH` is handled, but this means that the user's directory (typically specified in `~/.Renviron` or the default) is not at the front of the library path.
This means that users cannot override system-installed packages and more importantly, `install.packages` only ever considers the first entry in the library path unless you explicitly specify a `lib=` to install to. Some installation functions like `install_github` doesn't even have that customization point.
I suggest that we alter our R packages to prepend to `R_LIBS_SITE` instead, it seems to yield the preferred order for an end-user.
Our R installations do not have a bundled `site-library` directory so the environment variable is currently unused.
Upstream documentation on how the paths are constructed: https://stat.ethz.ch/R-manual/R-devel/library/base/html/libPaths.html
</issue>
<code>
[start of easybuild/easyblocks/generic/rpackage.py]
1 ##
2 # Copyright 2009-2021 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for building and installing R packages, implemented as an easyblock
27
28 @author: Stijn De Weirdt (Ghent University)
29 @author: Dries Verdegem (Ghent University)
30 @author: Kenneth Hoste (Ghent University)
31 @author: Jens Timmerman (Ghent University)
32 @author: Toon Willems (Ghent University)
33 @author: Balazs Hajgato (Vrije Universiteit Brussel)
34 """
35 import os
36
37 from easybuild.easyblocks.r import EXTS_FILTER_R_PACKAGES, EB_R
38 from easybuild.easyblocks.generic.configuremake import check_config_guess, obtain_config_guess
39 from easybuild.framework.easyconfig import CUSTOM
40 from easybuild.framework.extensioneasyblock import ExtensionEasyBlock
41 from easybuild.tools.build_log import EasyBuildError
42 from easybuild.tools.filetools import mkdir, copy_file
43 from easybuild.tools.run import run_cmd, parse_log_for_error
44
45
46 def make_R_install_option(opt, values, cmdline=False):
47 """
48 Make option list for install.packages, to specify in R environment.
49 """
50 txt = ""
51 if values:
52 if cmdline:
53 txt = " --%s=\"%s" % (opt, values[0])
54 else:
55 txt = "%s=c(\"%s" % (opt, values[0])
56 for i in values[1:]:
57 txt += " %s" % i
58 if cmdline:
59 txt += "\""
60 else:
61 txt += "\")"
62 return txt
63
64
65 class RPackage(ExtensionEasyBlock):
66 """
67 Install an R package as a separate module, or as an extension.
68 """
69
70 @staticmethod
71 def extra_options(extra_vars=None):
72 """Extra easyconfig parameters specific to RPackage."""
73 extra_vars = ExtensionEasyBlock.extra_options(extra_vars=extra_vars)
74 extra_vars.update({
75 'exts_subdir': ['', "Subdirectory where R extensions should be installed info", CUSTOM],
76 'unpack_sources': [False, "Unpack sources before installation", CUSTOM],
77 })
78 return extra_vars
79
80 def __init__(self, *args, **kwargs):
81 """Initliaze RPackage-specific class variables."""
82
83 super(RPackage, self).__init__(*args, **kwargs)
84
85 self.configurevars = []
86 self.configureargs = []
87 self.ext_src = None
88
89 def make_r_cmd(self, prefix=None):
90 """Create a command to run in R to install an R package."""
91 confvars = "confvars"
92 confargs = "confargs"
93 confvarslist = make_R_install_option(confvars, self.configurevars)
94 confargslist = make_R_install_option(confargs, self.configureargs)
95 confvarsstr = ""
96 if confvarslist:
97 confvarslist = confvarslist + "; names(%s)=\"%s\"" % (confvars, self.name)
98 confvarsstr = ", configure.vars=%s" % confvars
99 confargsstr = ""
100 if confargslist:
101 confargslist = confargslist + "; names(%s)=\"%s\"" % (confargs, self.name)
102 confargsstr = ", configure.args=%s" % confargs
103
104 if prefix:
105 prefix = '"%s", ' % prefix
106 else:
107 prefix = ''
108
109 r_cmd = """
110 options(repos=c(CRAN="http://www.freestatistics.org/cran"))
111 %s
112 %s
113 install.packages("%s", %s dependencies = FALSE %s%s)
114 """ % (confvarslist, confargslist, self.name, prefix, confvarsstr, confargsstr)
115 cmd = "%s R -q --no-save %s" % (self.cfg['preinstallopts'], self.cfg['installopts'])
116
117 self.log.debug("make_r_cmd returns %s with input %s" % (cmd, r_cmd))
118
119 return (cmd, r_cmd)
120
121 def make_cmdline_cmd(self, prefix=None):
122 """Create a command line to install an R package."""
123 confvars = ""
124 if self.configurevars:
125 confvars = make_R_install_option("configure-vars", self.configurevars, cmdline=True)
126 confargs = ""
127 if self.configureargs:
128 confargs = make_R_install_option("configure-args", self.configureargs, cmdline=True)
129
130 if prefix:
131 prefix = '--library=%s' % prefix
132 else:
133 prefix = ''
134
135 if self.start_dir:
136 loc = os.path.join(self.ext_dir or os.path.sep, self.start_dir)
137 else:
138 loc = self.ext_dir or self.ext_src
139
140 cmd = ' '.join([
141 self.cfg['preinstallopts'],
142 "R CMD INSTALL",
143 loc,
144 confargs,
145 confvars,
146 prefix,
147 '--no-clean-on-error',
148 self.cfg['installopts'],
149 ])
150
151 self.log.debug("make_cmdline_cmd returns %s" % cmd)
152 return cmd, None
153
154 def configure_step(self):
155 """No configuration for installing R packages."""
156 pass
157
158 def build_step(self):
159 """No separate build step for R packages."""
160 pass
161
162 def install_R_package(self, cmd, inp=None):
163 """Install R package as specified, and check for errors."""
164
165 cmdttdouterr, _ = run_cmd(cmd, log_all=True, simple=False, inp=inp, regexp=False)
166
167 cmderrors = parse_log_for_error(cmdttdouterr, regExp="^ERROR:")
168 if cmderrors:
169 cmd = "R -q --no-save"
170 stdin = """
171 remove.library(%s)
172 """ % self.name
173 # remove package if errors were detected
174 # it's possible that some of the dependencies failed, but the package itself was installed
175 run_cmd(cmd, log_all=False, log_ok=False, simple=False, inp=stdin, regexp=False)
176 raise EasyBuildError("Errors detected during installation of R package %s!", self.name)
177 else:
178 self.log.debug("R package %s installed succesfully" % self.name)
179
180 def update_config_guess(self, path):
181 """Update any config.guess found in specified directory"""
182 for config_guess_dir in (root for root, _, files in os.walk(path) if 'config.guess' in files):
183 config_guess = os.path.join(config_guess_dir, 'config.guess')
184 if not check_config_guess(config_guess):
185 updated_config_guess = obtain_config_guess()
186 if updated_config_guess:
187 self.log.debug("Replacing outdated %s with more recent %s", config_guess, updated_config_guess)
188 copy_file(updated_config_guess, config_guess)
189 else:
190 raise EasyBuildError("Failed to obtain updated config.guess")
191
192 def install_step(self):
193 """Install procedure for R packages."""
194 # Update config.guess if the package was extracted
195 if self.start_dir:
196 self.update_config_guess(self.start_dir)
197 cmd, stdin = self.make_cmdline_cmd(prefix=os.path.join(self.installdir, self.cfg['exts_subdir']))
198 self.install_R_package(cmd, inp=stdin)
199
200 def run(self):
201 """Install R package as an extension."""
202
203 # determine location
204 if isinstance(self.master, EB_R):
205 # extension is being installed as part of an R installation/module
206 (out, _) = run_cmd("R RHOME", log_all=True, simple=False)
207 rhome = out.strip()
208 lib_install_prefix = os.path.join(rhome, 'library')
209 else:
210 # extension is being installed in a separate installation prefix
211 lib_install_prefix = os.path.join(self.installdir, self.cfg['exts_subdir'])
212 mkdir(lib_install_prefix, parents=True)
213
214 if self.src:
215 super(RPackage, self).run(unpack_src=True)
216 self.ext_src = self.src
217 self.update_config_guess(self.ext_dir)
218 self.log.debug("Installing R package %s version %s." % (self.name, self.version))
219 cmd, stdin = self.make_cmdline_cmd(prefix=lib_install_prefix)
220 else:
221 if self.patches:
222 raise EasyBuildError("Cannot patch R package %s as no explicit source is given!", self.name)
223 self.log.debug("Installing most recent version of R package %s (source not found)." % self.name)
224 cmd, stdin = self.make_r_cmd(prefix=lib_install_prefix)
225
226 self.install_R_package(cmd, inp=stdin)
227
228 def sanity_check_step(self, *args, **kwargs):
229 """
230 Custom sanity check for R packages
231 """
232 return super(RPackage, self).sanity_check_step(EXTS_FILTER_R_PACKAGES, *args, **kwargs)
233
234 def make_module_extra(self):
235 """Add install path to R_LIBS"""
236 # prepend R_LIBS with install path
237 extra = self.module_generator.prepend_paths("R_LIBS", [self.cfg['exts_subdir']])
238 return super(RPackage, self).make_module_extra(extra)
239
[end of easybuild/easyblocks/generic/rpackage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/easyblocks/generic/rpackage.py b/easybuild/easyblocks/generic/rpackage.py
--- a/easybuild/easyblocks/generic/rpackage.py
+++ b/easybuild/easyblocks/generic/rpackage.py
@@ -232,7 +232,7 @@
return super(RPackage, self).sanity_check_step(EXTS_FILTER_R_PACKAGES, *args, **kwargs)
def make_module_extra(self):
- """Add install path to R_LIBS"""
- # prepend R_LIBS with install path
- extra = self.module_generator.prepend_paths("R_LIBS", [self.cfg['exts_subdir']])
+ """Add install path to R_LIBS_SITE"""
+ # prepend R_LIBS_SITE with install path
+ extra = self.module_generator.prepend_paths("R_LIBS_SITE", [self.cfg['exts_subdir']])
return super(RPackage, self).make_module_extra(extra)
| {"golden_diff": "diff --git a/easybuild/easyblocks/generic/rpackage.py b/easybuild/easyblocks/generic/rpackage.py\n--- a/easybuild/easyblocks/generic/rpackage.py\n+++ b/easybuild/easyblocks/generic/rpackage.py\n@@ -232,7 +232,7 @@\n return super(RPackage, self).sanity_check_step(EXTS_FILTER_R_PACKAGES, *args, **kwargs)\n \n def make_module_extra(self):\n- \"\"\"Add install path to R_LIBS\"\"\"\n- # prepend R_LIBS with install path\n- extra = self.module_generator.prepend_paths(\"R_LIBS\", [self.cfg['exts_subdir']])\n+ \"\"\"Add install path to R_LIBS_SITE\"\"\"\n+ # prepend R_LIBS_SITE with install path\n+ extra = self.module_generator.prepend_paths(\"R_LIBS_SITE\", [self.cfg['exts_subdir']])\n return super(RPackage, self).make_module_extra(extra)\n", "issue": "R package modules have priority over user-installed packages\nR has multiple variables that control the library path, `R_LIBS`, `R_LIBS_USER` and `R_LIBS_SITE`.\r\n\r\nThe path can be inspected with `.libPaths()` and is made up of in order of the entries in `R_LIBS`, `R_LIBS_USER` and `R_LIBS_SITE` and last the R installation itself.\r\n\r\nOur R package modules currently prepend their path to `R_LIBS` similar to how `PYTHONPATH` is handled, but this means that the user's directory (typically specified in `~/.Renviron` or the default) is not at the front of the library path.\r\n\r\nThis means that users cannot override system-installed packages and more importantly, `install.packages` only ever considers the first entry in the library path unless you explicitly specify a `lib=` to install to. Some installation functions like `install_github` doesn't even have that customization point.\r\n\r\nI suggest that we alter our R packages to prepend to `R_LIBS_SITE` instead, it seems to yield the preferred order for an end-user.\r\n\r\nOur R installations do not have a bundled `site-library` directory so the environment variable is currently unused.\r\n\r\nUpstream documentation on how the paths are constructed: https://stat.ethz.ch/R-manual/R-devel/library/base/html/libPaths.html\n", "before_files": [{"content": "##\n# Copyright 2009-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing R packages, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Toon Willems (Ghent University)\n@author: Balazs Hajgato (Vrije Universiteit Brussel)\n\"\"\"\nimport os\n\nfrom easybuild.easyblocks.r import EXTS_FILTER_R_PACKAGES, EB_R\nfrom easybuild.easyblocks.generic.configuremake import check_config_guess, obtain_config_guess\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.framework.extensioneasyblock import ExtensionEasyBlock\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import mkdir, copy_file\nfrom easybuild.tools.run import run_cmd, parse_log_for_error\n\n\ndef make_R_install_option(opt, values, cmdline=False):\n \"\"\"\n Make option list for install.packages, to specify in R environment.\n \"\"\"\n txt = \"\"\n if values:\n if cmdline:\n txt = \" --%s=\\\"%s\" % (opt, values[0])\n else:\n txt = \"%s=c(\\\"%s\" % (opt, values[0])\n for i in values[1:]:\n txt += \" %s\" % i\n if cmdline:\n txt += \"\\\"\"\n else:\n txt += \"\\\")\"\n return txt\n\n\nclass RPackage(ExtensionEasyBlock):\n \"\"\"\n Install an R package as a separate module, or as an extension.\n \"\"\"\n\n @staticmethod\n def extra_options(extra_vars=None):\n \"\"\"Extra easyconfig parameters specific to RPackage.\"\"\"\n extra_vars = ExtensionEasyBlock.extra_options(extra_vars=extra_vars)\n extra_vars.update({\n 'exts_subdir': ['', \"Subdirectory where R extensions should be installed info\", CUSTOM],\n 'unpack_sources': [False, \"Unpack sources before installation\", CUSTOM],\n })\n return extra_vars\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initliaze RPackage-specific class variables.\"\"\"\n\n super(RPackage, self).__init__(*args, **kwargs)\n\n self.configurevars = []\n self.configureargs = []\n self.ext_src = None\n\n def make_r_cmd(self, prefix=None):\n \"\"\"Create a command to run in R to install an R package.\"\"\"\n confvars = \"confvars\"\n confargs = \"confargs\"\n confvarslist = make_R_install_option(confvars, self.configurevars)\n confargslist = make_R_install_option(confargs, self.configureargs)\n confvarsstr = \"\"\n if confvarslist:\n confvarslist = confvarslist + \"; names(%s)=\\\"%s\\\"\" % (confvars, self.name)\n confvarsstr = \", configure.vars=%s\" % confvars\n confargsstr = \"\"\n if confargslist:\n confargslist = confargslist + \"; names(%s)=\\\"%s\\\"\" % (confargs, self.name)\n confargsstr = \", configure.args=%s\" % confargs\n\n if prefix:\n prefix = '\"%s\", ' % prefix\n else:\n prefix = ''\n\n r_cmd = \"\"\"\n options(repos=c(CRAN=\"http://www.freestatistics.org/cran\"))\n %s\n %s\n install.packages(\"%s\", %s dependencies = FALSE %s%s)\n \"\"\" % (confvarslist, confargslist, self.name, prefix, confvarsstr, confargsstr)\n cmd = \"%s R -q --no-save %s\" % (self.cfg['preinstallopts'], self.cfg['installopts'])\n\n self.log.debug(\"make_r_cmd returns %s with input %s\" % (cmd, r_cmd))\n\n return (cmd, r_cmd)\n\n def make_cmdline_cmd(self, prefix=None):\n \"\"\"Create a command line to install an R package.\"\"\"\n confvars = \"\"\n if self.configurevars:\n confvars = make_R_install_option(\"configure-vars\", self.configurevars, cmdline=True)\n confargs = \"\"\n if self.configureargs:\n confargs = make_R_install_option(\"configure-args\", self.configureargs, cmdline=True)\n\n if prefix:\n prefix = '--library=%s' % prefix\n else:\n prefix = ''\n\n if self.start_dir:\n loc = os.path.join(self.ext_dir or os.path.sep, self.start_dir)\n else:\n loc = self.ext_dir or self.ext_src\n\n cmd = ' '.join([\n self.cfg['preinstallopts'],\n \"R CMD INSTALL\",\n loc,\n confargs,\n confvars,\n prefix,\n '--no-clean-on-error',\n self.cfg['installopts'],\n ])\n\n self.log.debug(\"make_cmdline_cmd returns %s\" % cmd)\n return cmd, None\n\n def configure_step(self):\n \"\"\"No configuration for installing R packages.\"\"\"\n pass\n\n def build_step(self):\n \"\"\"No separate build step for R packages.\"\"\"\n pass\n\n def install_R_package(self, cmd, inp=None):\n \"\"\"Install R package as specified, and check for errors.\"\"\"\n\n cmdttdouterr, _ = run_cmd(cmd, log_all=True, simple=False, inp=inp, regexp=False)\n\n cmderrors = parse_log_for_error(cmdttdouterr, regExp=\"^ERROR:\")\n if cmderrors:\n cmd = \"R -q --no-save\"\n stdin = \"\"\"\n remove.library(%s)\n \"\"\" % self.name\n # remove package if errors were detected\n # it's possible that some of the dependencies failed, but the package itself was installed\n run_cmd(cmd, log_all=False, log_ok=False, simple=False, inp=stdin, regexp=False)\n raise EasyBuildError(\"Errors detected during installation of R package %s!\", self.name)\n else:\n self.log.debug(\"R package %s installed succesfully\" % self.name)\n\n def update_config_guess(self, path):\n \"\"\"Update any config.guess found in specified directory\"\"\"\n for config_guess_dir in (root for root, _, files in os.walk(path) if 'config.guess' in files):\n config_guess = os.path.join(config_guess_dir, 'config.guess')\n if not check_config_guess(config_guess):\n updated_config_guess = obtain_config_guess()\n if updated_config_guess:\n self.log.debug(\"Replacing outdated %s with more recent %s\", config_guess, updated_config_guess)\n copy_file(updated_config_guess, config_guess)\n else:\n raise EasyBuildError(\"Failed to obtain updated config.guess\")\n\n def install_step(self):\n \"\"\"Install procedure for R packages.\"\"\"\n # Update config.guess if the package was extracted\n if self.start_dir:\n self.update_config_guess(self.start_dir)\n cmd, stdin = self.make_cmdline_cmd(prefix=os.path.join(self.installdir, self.cfg['exts_subdir']))\n self.install_R_package(cmd, inp=stdin)\n\n def run(self):\n \"\"\"Install R package as an extension.\"\"\"\n\n # determine location\n if isinstance(self.master, EB_R):\n # extension is being installed as part of an R installation/module\n (out, _) = run_cmd(\"R RHOME\", log_all=True, simple=False)\n rhome = out.strip()\n lib_install_prefix = os.path.join(rhome, 'library')\n else:\n # extension is being installed in a separate installation prefix\n lib_install_prefix = os.path.join(self.installdir, self.cfg['exts_subdir'])\n mkdir(lib_install_prefix, parents=True)\n\n if self.src:\n super(RPackage, self).run(unpack_src=True)\n self.ext_src = self.src\n self.update_config_guess(self.ext_dir)\n self.log.debug(\"Installing R package %s version %s.\" % (self.name, self.version))\n cmd, stdin = self.make_cmdline_cmd(prefix=lib_install_prefix)\n else:\n if self.patches:\n raise EasyBuildError(\"Cannot patch R package %s as no explicit source is given!\", self.name)\n self.log.debug(\"Installing most recent version of R package %s (source not found).\" % self.name)\n cmd, stdin = self.make_r_cmd(prefix=lib_install_prefix)\n\n self.install_R_package(cmd, inp=stdin)\n\n def sanity_check_step(self, *args, **kwargs):\n \"\"\"\n Custom sanity check for R packages\n \"\"\"\n return super(RPackage, self).sanity_check_step(EXTS_FILTER_R_PACKAGES, *args, **kwargs)\n\n def make_module_extra(self):\n \"\"\"Add install path to R_LIBS\"\"\"\n # prepend R_LIBS with install path\n extra = self.module_generator.prepend_paths(\"R_LIBS\", [self.cfg['exts_subdir']])\n return super(RPackage, self).make_module_extra(extra)\n", "path": "easybuild/easyblocks/generic/rpackage.py"}]} | 3,624 | 210 |
gh_patches_debug_31739 | rasdani/github-patches | git_diff | streamlink__streamlink-1863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove 9anime.to
As discussed over here: https://github.com/streamlink/streamlink/issues/1110#issuecomment-400687075 9anime.to isn't worth supporting at this point and is broken so I'm proposing we remove it.
</issue>
<code>
[start of src/streamlink/plugins/nineanime.py]
1 import re
2 from streamlink.plugin import Plugin
3 from streamlink.plugin.api import http
4 from streamlink.plugin.api import useragents
5 from streamlink.plugin.api import validate
6 from streamlink.stream import HTTPStream
7 from streamlink.compat import urlparse
8
9
10 class NineAnime(Plugin):
11 _episode_info_url = "//9anime.to/ajax/episode/info"
12
13 _info_schema = validate.Schema({
14 "grabber": validate.url(),
15 "params": {
16 "id": validate.text,
17 "token": validate.text,
18 "options": validate.text,
19 }
20 })
21
22 _streams_schema = validate.Schema({
23 "token": validate.text,
24 "error": None,
25 "data": [{
26 "label": validate.text,
27 "file": validate.url(),
28 "type": "mp4"
29 }]
30 })
31
32 _url_re = re.compile(r"https?://9anime.to/watch/(?:[^.]+?\.)(\w+)/(\w+)")
33
34 @classmethod
35 def can_handle_url(cls, url):
36 return cls._url_re.match(url) is not None
37
38 def add_scheme(self, url):
39 # update the scheme for the grabber url if required
40 if url.startswith("//"):
41 url = "{0}:{1}".format(urlparse(self.url).scheme, url)
42 return url
43
44 @Plugin.broken(1110)
45 def _get_streams(self):
46 match = self._url_re.match(self.url)
47 film_id, episode_id = match.groups()
48
49 headers = {
50 "Referer": self.url,
51 "User-Agent": useragents.FIREFOX
52 }
53
54 # Get the info about the Episode, including the Grabber API URL
55 info_res = http.get(self.add_scheme(self._episode_info_url),
56 params=dict(update=0, film=film_id, id=episode_id),
57 headers=headers)
58 info = http.json(info_res, schema=self._info_schema)
59
60 # Get the data about the streams from the Grabber API
61 grabber_url = self.add_scheme(info["grabber"])
62 stream_list_res = http.get(grabber_url, params=info["params"], headers=headers)
63 stream_data = http.json(stream_list_res, schema=self._streams_schema)
64
65 for stream in stream_data["data"]:
66 yield stream["label"], HTTPStream(self.session, stream["file"])
67
68
69 __plugin__ = NineAnime
70
[end of src/streamlink/plugins/nineanime.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/nineanime.py b/src/streamlink/plugins/nineanime.py
--- a/src/streamlink/plugins/nineanime.py
+++ b/src/streamlink/plugins/nineanime.py
@@ -1,69 +1 @@
-import re
-from streamlink.plugin import Plugin
-from streamlink.plugin.api import http
-from streamlink.plugin.api import useragents
-from streamlink.plugin.api import validate
-from streamlink.stream import HTTPStream
-from streamlink.compat import urlparse
-
-
-class NineAnime(Plugin):
- _episode_info_url = "//9anime.to/ajax/episode/info"
-
- _info_schema = validate.Schema({
- "grabber": validate.url(),
- "params": {
- "id": validate.text,
- "token": validate.text,
- "options": validate.text,
- }
- })
-
- _streams_schema = validate.Schema({
- "token": validate.text,
- "error": None,
- "data": [{
- "label": validate.text,
- "file": validate.url(),
- "type": "mp4"
- }]
- })
-
- _url_re = re.compile(r"https?://9anime.to/watch/(?:[^.]+?\.)(\w+)/(\w+)")
-
- @classmethod
- def can_handle_url(cls, url):
- return cls._url_re.match(url) is not None
-
- def add_scheme(self, url):
- # update the scheme for the grabber url if required
- if url.startswith("//"):
- url = "{0}:{1}".format(urlparse(self.url).scheme, url)
- return url
-
- @Plugin.broken(1110)
- def _get_streams(self):
- match = self._url_re.match(self.url)
- film_id, episode_id = match.groups()
-
- headers = {
- "Referer": self.url,
- "User-Agent": useragents.FIREFOX
- }
-
- # Get the info about the Episode, including the Grabber API URL
- info_res = http.get(self.add_scheme(self._episode_info_url),
- params=dict(update=0, film=film_id, id=episode_id),
- headers=headers)
- info = http.json(info_res, schema=self._info_schema)
-
- # Get the data about the streams from the Grabber API
- grabber_url = self.add_scheme(info["grabber"])
- stream_list_res = http.get(grabber_url, params=info["params"], headers=headers)
- stream_data = http.json(stream_list_res, schema=self._streams_schema)
-
- for stream in stream_data["data"]:
- yield stream["label"], HTTPStream(self.session, stream["file"])
-
-
-__plugin__ = NineAnime
+# Plugin removed - https://github.com/streamlink/streamlink/issues/1862
| {"golden_diff": "diff --git a/src/streamlink/plugins/nineanime.py b/src/streamlink/plugins/nineanime.py\n--- a/src/streamlink/plugins/nineanime.py\n+++ b/src/streamlink/plugins/nineanime.py\n@@ -1,69 +1 @@\n-import re\n-from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http\n-from streamlink.plugin.api import useragents\n-from streamlink.plugin.api import validate\n-from streamlink.stream import HTTPStream\n-from streamlink.compat import urlparse\n-\n-\n-class NineAnime(Plugin):\n- _episode_info_url = \"//9anime.to/ajax/episode/info\"\n-\n- _info_schema = validate.Schema({\n- \"grabber\": validate.url(),\n- \"params\": {\n- \"id\": validate.text,\n- \"token\": validate.text,\n- \"options\": validate.text,\n- }\n- })\n-\n- _streams_schema = validate.Schema({\n- \"token\": validate.text,\n- \"error\": None,\n- \"data\": [{\n- \"label\": validate.text,\n- \"file\": validate.url(),\n- \"type\": \"mp4\"\n- }]\n- })\n-\n- _url_re = re.compile(r\"https?://9anime.to/watch/(?:[^.]+?\\.)(\\w+)/(\\w+)\")\n-\n- @classmethod\n- def can_handle_url(cls, url):\n- return cls._url_re.match(url) is not None\n-\n- def add_scheme(self, url):\n- # update the scheme for the grabber url if required\n- if url.startswith(\"//\"):\n- url = \"{0}:{1}\".format(urlparse(self.url).scheme, url)\n- return url\n-\n- @Plugin.broken(1110)\n- def _get_streams(self):\n- match = self._url_re.match(self.url)\n- film_id, episode_id = match.groups()\n-\n- headers = {\n- \"Referer\": self.url,\n- \"User-Agent\": useragents.FIREFOX\n- }\n-\n- # Get the info about the Episode, including the Grabber API URL\n- info_res = http.get(self.add_scheme(self._episode_info_url),\n- params=dict(update=0, film=film_id, id=episode_id),\n- headers=headers)\n- info = http.json(info_res, schema=self._info_schema)\n-\n- # Get the data about the streams from the Grabber API\n- grabber_url = self.add_scheme(info[\"grabber\"])\n- stream_list_res = http.get(grabber_url, params=info[\"params\"], headers=headers)\n- stream_data = http.json(stream_list_res, schema=self._streams_schema)\n-\n- for stream in stream_data[\"data\"]:\n- yield stream[\"label\"], HTTPStream(self.session, stream[\"file\"])\n-\n-\n-__plugin__ = NineAnime\n+# Plugin removed - https://github.com/streamlink/streamlink/issues/1862\n", "issue": "Remove 9anime.to\nAs discussed over here: https://github.com/streamlink/streamlink/issues/1110#issuecomment-400687075 9anime.to isn't worth supporting at this point and is broken so I'm proposing we remove it.\r\n\n", "before_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HTTPStream\nfrom streamlink.compat import urlparse\n\n\nclass NineAnime(Plugin):\n _episode_info_url = \"//9anime.to/ajax/episode/info\"\n\n _info_schema = validate.Schema({\n \"grabber\": validate.url(),\n \"params\": {\n \"id\": validate.text,\n \"token\": validate.text,\n \"options\": validate.text,\n }\n })\n\n _streams_schema = validate.Schema({\n \"token\": validate.text,\n \"error\": None,\n \"data\": [{\n \"label\": validate.text,\n \"file\": validate.url(),\n \"type\": \"mp4\"\n }]\n })\n\n _url_re = re.compile(r\"https?://9anime.to/watch/(?:[^.]+?\\.)(\\w+)/(\\w+)\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def add_scheme(self, url):\n # update the scheme for the grabber url if required\n if url.startswith(\"//\"):\n url = \"{0}:{1}\".format(urlparse(self.url).scheme, url)\n return url\n\n @Plugin.broken(1110)\n def _get_streams(self):\n match = self._url_re.match(self.url)\n film_id, episode_id = match.groups()\n\n headers = {\n \"Referer\": self.url,\n \"User-Agent\": useragents.FIREFOX\n }\n\n # Get the info about the Episode, including the Grabber API URL\n info_res = http.get(self.add_scheme(self._episode_info_url),\n params=dict(update=0, film=film_id, id=episode_id),\n headers=headers)\n info = http.json(info_res, schema=self._info_schema)\n\n # Get the data about the streams from the Grabber API\n grabber_url = self.add_scheme(info[\"grabber\"])\n stream_list_res = http.get(grabber_url, params=info[\"params\"], headers=headers)\n stream_data = http.json(stream_list_res, schema=self._streams_schema)\n\n for stream in stream_data[\"data\"]:\n yield stream[\"label\"], HTTPStream(self.session, stream[\"file\"])\n\n\n__plugin__ = NineAnime\n", "path": "src/streamlink/plugins/nineanime.py"}]} | 1,247 | 632 |
gh_patches_debug_8550 | rasdani/github-patches | git_diff | zulip__zulip-12755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
portico/docs: Improve OpenAPI descriptions and titles.
Followups to https://github.com/zulip/zulip/pull/11045.
- [ ] Currently we only use the first paragraph of text for the description. It would be better to concatenate the first few paragraphs, and truncate. Basically it's just using bs.find_all('p') and joining things together in a loop until it gets too long.
One catch is that is that we probably want to stop at the first non-paragraph block so we don't cross a list or something.
- [ ] Currently, we've just tested our openAPI things with /api and /help. We should extend to the /integrations pages, and also figure out what we want to do for the top-10 major portico pages (especially /features, /why-zulip, and /for/open-source), which could benefit from a similar treatment.
To work on this I would start by skimming the code from #11045.
</issue>
<code>
[start of zerver/views/documentation.py]
1 from typing import Any, Dict, Tuple
2 from collections import OrderedDict
3 from django.views.generic import TemplateView
4 from django.conf import settings
5 from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
6 from django.template import loader
7
8 import os
9 import random
10 import re
11
12 from zerver.lib.integrations import CATEGORIES, INTEGRATIONS, HubotIntegration, \
13 WebhookIntegration, EmailIntegration
14 from zerver.lib.request import has_request_variables, REQ
15 from zerver.lib.subdomains import get_subdomain
16 from zerver.models import Realm
17 from zerver.templatetags.app_filters import render_markdown_path
18 from zerver.context_processors import zulip_default_context
19
20 def add_api_uri_context(context: Dict[str, Any], request: HttpRequest) -> None:
21 context.update(zulip_default_context(request))
22
23 subdomain = get_subdomain(request)
24 if (subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
25 or not settings.ROOT_DOMAIN_LANDING_PAGE):
26 display_subdomain = subdomain
27 html_settings_links = True
28 else:
29 display_subdomain = 'yourZulipDomain'
30 html_settings_links = False
31
32 display_host = Realm.host_for_subdomain(display_subdomain)
33 api_url_scheme_relative = display_host + "/api"
34 api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative
35 zulip_url = settings.EXTERNAL_URI_SCHEME + display_host
36
37 context['external_uri_scheme'] = settings.EXTERNAL_URI_SCHEME
38 context['api_url'] = api_url
39 context['api_url_scheme_relative'] = api_url_scheme_relative
40 context['zulip_url'] = zulip_url
41
42 context["html_settings_links"] = html_settings_links
43 if html_settings_links:
44 settings_html = '<a href="/#settings">Zulip settings page</a>'
45 subscriptions_html = '<a target="_blank" href="/#streams">streams page</a>'
46 else:
47 settings_html = 'Zulip settings page'
48 subscriptions_html = 'streams page'
49 context['settings_html'] = settings_html
50 context['subscriptions_html'] = subscriptions_html
51
52 class ApiURLView(TemplateView):
53 def get_context_data(self, **kwargs: Any) -> Dict[str, str]:
54 context = super().get_context_data(**kwargs)
55 add_api_uri_context(context, self.request)
56 return context
57
58 class APIView(ApiURLView):
59 template_name = 'zerver/api.html'
60
61
62 class MarkdownDirectoryView(ApiURLView):
63 path_template = ""
64
65 def get_path(self, article: str) -> Tuple[str, int]:
66 http_status = 200
67 if article == "":
68 article = "index"
69 elif article == "include/sidebar_index":
70 pass
71 elif "/" in article:
72 article = "missing"
73 http_status = 404
74 elif len(article) > 100 or not re.match('^[0-9a-zA-Z_-]+$', article):
75 article = "missing"
76 http_status = 404
77
78 path = self.path_template % (article,)
79 try:
80 loader.get_template(path)
81 return (path, http_status)
82 except loader.TemplateDoesNotExist:
83 return (self.path_template % ("missing",), 404)
84
85 def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
86 article = kwargs["article"]
87 context = super().get_context_data() # type: Dict[str, Any]
88 (context["article"], http_status_ignored) = self.get_path(article)
89
90 # For disabling the "Back to home" on the homepage
91 context["not_index_page"] = not context["article"].endswith("/index.md")
92 if self.path_template == '/zerver/help/%s.md':
93 context["page_is_help_center"] = True
94 context["doc_root"] = "/help/"
95 (sidebar_index, http_status_ignored) = self.get_path("include/sidebar_index")
96 # We want the sliding/collapsing behavior for /help pages only
97 sidebar_class = "sidebar slide"
98 title_base = "Zulip Help Center"
99 else:
100 context["page_is_api_center"] = True
101 context["doc_root"] = "/api/"
102 (sidebar_index, http_status_ignored) = self.get_path("sidebar_index")
103 sidebar_class = "sidebar"
104 title_base = "Zulip API Documentation"
105
106 # The following is a somewhat hacky approach to extract titles from articles.
107 # Hack: `context["article"] has a leading `/`, so we use + to add directories.
108 article_path = os.path.join(settings.DEPLOY_ROOT, 'templates') + context["article"]
109 if os.path.exists(article_path):
110 with open(article_path) as article_file:
111 first_line = article_file.readlines()[0]
112 # Strip the header and then use the first line to get the article title
113 article_title = first_line.strip().lstrip("# ")
114 if context["not_index_page"]:
115 context["OPEN_GRAPH_TITLE"] = "%s (%s)" % (article_title, title_base)
116 else:
117 context["OPEN_GRAPH_TITLE"] = title_base
118 self.request.placeholder_open_graph_description = (
119 "REPLACMENT_OPEN_GRAPH_DESCRIPTION_%s" % (int(2**24 * random.random()),))
120 context["OPEN_GRAPH_DESCRIPTION"] = self.request.placeholder_open_graph_description
121
122 context["sidebar_index"] = sidebar_index
123 context["sidebar_class"] = sidebar_class
124 # An "article" might require the api_uri_context to be rendered
125 api_uri_context = {} # type: Dict[str, Any]
126 add_api_uri_context(api_uri_context, self.request)
127 api_uri_context["run_content_validators"] = True
128 context["api_uri_context"] = api_uri_context
129 return context
130
131 def get(self, request: HttpRequest, article: str="") -> HttpResponse:
132 (path, http_status) = self.get_path(article)
133 result = super().get(self, article=article)
134 if http_status != 200:
135 result.status_code = http_status
136 return result
137
138 def add_integrations_context(context: Dict[str, Any]) -> None:
139 alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))
140 alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))
141 enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))
142 # Subtract 1 so saying "Over X integrations" is correct. Then,
143 # round down to the nearest multiple of 10.
144 integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10
145 context['categories_dict'] = alphabetical_sorted_categories
146 context['integrations_dict'] = alphabetical_sorted_integration
147 context['integrations_count_display'] = integrations_count_display
148
149 def add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:
150 path_name = request.path.rstrip('/').split('/')[-1]
151 description = ('Zulip comes with over a hundred native integrations out of the box, '
152 'and integrates with Zapier, IFTTT, and Hubot to provide hundreds more. '
153 'Connect the apps you use everyday to Zulip.')
154
155 if path_name in INTEGRATIONS:
156 integration = INTEGRATIONS[path_name]
157 context['OPEN_GRAPH_TITLE'] = 'Connect {name} to Zulip'.format(name=integration.display_name)
158 context['OPEN_GRAPH_DESCRIPTION'] = description
159
160 elif path_name in CATEGORIES:
161 category = CATEGORIES[path_name]
162 context['OPEN_GRAPH_TITLE'] = 'Connect your {category} tools to Zulip'.format(category=category)
163 context['OPEN_GRAPH_DESCRIPTION'] = description
164
165 class IntegrationView(ApiURLView):
166 template_name = 'zerver/integrations/index.html'
167
168 def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
169 context = super().get_context_data(**kwargs) # type: Dict[str, Any]
170 add_integrations_context(context)
171 add_integrations_open_graph_context(context, self.request)
172 return context
173
174
175 @has_request_variables
176 def integration_doc(request: HttpRequest, integration_name: str=REQ(default=None)) -> HttpResponse:
177 if not request.is_ajax():
178 return HttpResponseNotFound()
179 try:
180 integration = INTEGRATIONS[integration_name]
181 except KeyError:
182 return HttpResponseNotFound()
183
184 context = {} # type: Dict[str, Any]
185 add_api_uri_context(context, request)
186
187 context['integration_name'] = integration.name
188 context['integration_display_name'] = integration.display_name
189 if hasattr(integration, 'stream_name'):
190 context['recommended_stream_name'] = integration.stream_name
191 if isinstance(integration, WebhookIntegration):
192 context['integration_url'] = integration.url[3:]
193 if isinstance(integration, HubotIntegration):
194 context['hubot_docs_url'] = integration.hubot_docs_url
195 if isinstance(integration, EmailIntegration):
196 context['email_gateway_example'] = settings.EMAIL_GATEWAY_EXAMPLE
197
198 doc_html_str = render_markdown_path(integration.doc, context)
199
200 return HttpResponse(doc_html_str)
201
[end of zerver/views/documentation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/views/documentation.py b/zerver/views/documentation.py
--- a/zerver/views/documentation.py
+++ b/zerver/views/documentation.py
@@ -162,6 +162,10 @@
context['OPEN_GRAPH_TITLE'] = 'Connect your {category} tools to Zulip'.format(category=category)
context['OPEN_GRAPH_DESCRIPTION'] = description
+ elif path_name == 'integrations':
+ context['OPEN_GRAPH_TITLE'] = 'Connect the tools you use to Zulip'
+ context['OPEN_GRAPH_DESCRIPTION'] = description
+
class IntegrationView(ApiURLView):
template_name = 'zerver/integrations/index.html'
| {"golden_diff": "diff --git a/zerver/views/documentation.py b/zerver/views/documentation.py\n--- a/zerver/views/documentation.py\n+++ b/zerver/views/documentation.py\n@@ -162,6 +162,10 @@\n context['OPEN_GRAPH_TITLE'] = 'Connect your {category} tools to Zulip'.format(category=category)\n context['OPEN_GRAPH_DESCRIPTION'] = description\n \n+ elif path_name == 'integrations':\n+ context['OPEN_GRAPH_TITLE'] = 'Connect the tools you use to Zulip'\n+ context['OPEN_GRAPH_DESCRIPTION'] = description\n+\n class IntegrationView(ApiURLView):\n template_name = 'zerver/integrations/index.html'\n", "issue": "portico/docs: Improve OpenAPI descriptions and titles.\nFollowups to https://github.com/zulip/zulip/pull/11045.\r\n- [ ] Currently we only use the first paragraph of text for the description. It would be better to concatenate the first few paragraphs, and truncate. Basically it's just using bs.find_all('p') and joining things together in a loop until it gets too long. \r\nOne catch is that is that we probably want to stop at the first non-paragraph block so we don't cross a list or something.\r\n- [ ] Currently, we've just tested our openAPI things with /api and /help. We should extend to the /integrations pages, and also figure out what we want to do for the top-10 major portico pages (especially /features, /why-zulip, and /for/open-source), which could benefit from a similar treatment.\r\n\r\nTo work on this I would start by skimming the code from #11045. \n", "before_files": [{"content": "from typing import Any, Dict, Tuple\nfrom collections import OrderedDict\nfrom django.views.generic import TemplateView\nfrom django.conf import settings\nfrom django.http import HttpRequest, HttpResponse, HttpResponseNotFound\nfrom django.template import loader\n\nimport os\nimport random\nimport re\n\nfrom zerver.lib.integrations import CATEGORIES, INTEGRATIONS, HubotIntegration, \\\n WebhookIntegration, EmailIntegration\nfrom zerver.lib.request import has_request_variables, REQ\nfrom zerver.lib.subdomains import get_subdomain\nfrom zerver.models import Realm\nfrom zerver.templatetags.app_filters import render_markdown_path\nfrom zerver.context_processors import zulip_default_context\n\ndef add_api_uri_context(context: Dict[str, Any], request: HttpRequest) -> None:\n context.update(zulip_default_context(request))\n\n subdomain = get_subdomain(request)\n if (subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN\n or not settings.ROOT_DOMAIN_LANDING_PAGE):\n display_subdomain = subdomain\n html_settings_links = True\n else:\n display_subdomain = 'yourZulipDomain'\n html_settings_links = False\n\n display_host = Realm.host_for_subdomain(display_subdomain)\n api_url_scheme_relative = display_host + \"/api\"\n api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative\n zulip_url = settings.EXTERNAL_URI_SCHEME + display_host\n\n context['external_uri_scheme'] = settings.EXTERNAL_URI_SCHEME\n context['api_url'] = api_url\n context['api_url_scheme_relative'] = api_url_scheme_relative\n context['zulip_url'] = zulip_url\n\n context[\"html_settings_links\"] = html_settings_links\n if html_settings_links:\n settings_html = '<a href=\"/#settings\">Zulip settings page</a>'\n subscriptions_html = '<a target=\"_blank\" href=\"/#streams\">streams page</a>'\n else:\n settings_html = 'Zulip settings page'\n subscriptions_html = 'streams page'\n context['settings_html'] = settings_html\n context['subscriptions_html'] = subscriptions_html\n\nclass ApiURLView(TemplateView):\n def get_context_data(self, **kwargs: Any) -> Dict[str, str]:\n context = super().get_context_data(**kwargs)\n add_api_uri_context(context, self.request)\n return context\n\nclass APIView(ApiURLView):\n template_name = 'zerver/api.html'\n\n\nclass MarkdownDirectoryView(ApiURLView):\n path_template = \"\"\n\n def get_path(self, article: str) -> Tuple[str, int]:\n http_status = 200\n if article == \"\":\n article = \"index\"\n elif article == \"include/sidebar_index\":\n pass\n elif \"/\" in article:\n article = \"missing\"\n http_status = 404\n elif len(article) > 100 or not re.match('^[0-9a-zA-Z_-]+$', article):\n article = \"missing\"\n http_status = 404\n\n path = self.path_template % (article,)\n try:\n loader.get_template(path)\n return (path, http_status)\n except loader.TemplateDoesNotExist:\n return (self.path_template % (\"missing\",), 404)\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n article = kwargs[\"article\"]\n context = super().get_context_data() # type: Dict[str, Any]\n (context[\"article\"], http_status_ignored) = self.get_path(article)\n\n # For disabling the \"Back to home\" on the homepage\n context[\"not_index_page\"] = not context[\"article\"].endswith(\"/index.md\")\n if self.path_template == '/zerver/help/%s.md':\n context[\"page_is_help_center\"] = True\n context[\"doc_root\"] = \"/help/\"\n (sidebar_index, http_status_ignored) = self.get_path(\"include/sidebar_index\")\n # We want the sliding/collapsing behavior for /help pages only\n sidebar_class = \"sidebar slide\"\n title_base = \"Zulip Help Center\"\n else:\n context[\"page_is_api_center\"] = True\n context[\"doc_root\"] = \"/api/\"\n (sidebar_index, http_status_ignored) = self.get_path(\"sidebar_index\")\n sidebar_class = \"sidebar\"\n title_base = \"Zulip API Documentation\"\n\n # The following is a somewhat hacky approach to extract titles from articles.\n # Hack: `context[\"article\"] has a leading `/`, so we use + to add directories.\n article_path = os.path.join(settings.DEPLOY_ROOT, 'templates') + context[\"article\"]\n if os.path.exists(article_path):\n with open(article_path) as article_file:\n first_line = article_file.readlines()[0]\n # Strip the header and then use the first line to get the article title\n article_title = first_line.strip().lstrip(\"# \")\n if context[\"not_index_page\"]:\n context[\"OPEN_GRAPH_TITLE\"] = \"%s (%s)\" % (article_title, title_base)\n else:\n context[\"OPEN_GRAPH_TITLE\"] = title_base\n self.request.placeholder_open_graph_description = (\n \"REPLACMENT_OPEN_GRAPH_DESCRIPTION_%s\" % (int(2**24 * random.random()),))\n context[\"OPEN_GRAPH_DESCRIPTION\"] = self.request.placeholder_open_graph_description\n\n context[\"sidebar_index\"] = sidebar_index\n context[\"sidebar_class\"] = sidebar_class\n # An \"article\" might require the api_uri_context to be rendered\n api_uri_context = {} # type: Dict[str, Any]\n add_api_uri_context(api_uri_context, self.request)\n api_uri_context[\"run_content_validators\"] = True\n context[\"api_uri_context\"] = api_uri_context\n return context\n\n def get(self, request: HttpRequest, article: str=\"\") -> HttpResponse:\n (path, http_status) = self.get_path(article)\n result = super().get(self, article=article)\n if http_status != 200:\n result.status_code = http_status\n return result\n\ndef add_integrations_context(context: Dict[str, Any]) -> None:\n alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))\n alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))\n enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))\n # Subtract 1 so saying \"Over X integrations\" is correct. Then,\n # round down to the nearest multiple of 10.\n integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10\n context['categories_dict'] = alphabetical_sorted_categories\n context['integrations_dict'] = alphabetical_sorted_integration\n context['integrations_count_display'] = integrations_count_display\n\ndef add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:\n path_name = request.path.rstrip('/').split('/')[-1]\n description = ('Zulip comes with over a hundred native integrations out of the box, '\n 'and integrates with Zapier, IFTTT, and Hubot to provide hundreds more. '\n 'Connect the apps you use everyday to Zulip.')\n\n if path_name in INTEGRATIONS:\n integration = INTEGRATIONS[path_name]\n context['OPEN_GRAPH_TITLE'] = 'Connect {name} to Zulip'.format(name=integration.display_name)\n context['OPEN_GRAPH_DESCRIPTION'] = description\n\n elif path_name in CATEGORIES:\n category = CATEGORIES[path_name]\n context['OPEN_GRAPH_TITLE'] = 'Connect your {category} tools to Zulip'.format(category=category)\n context['OPEN_GRAPH_DESCRIPTION'] = description\n\nclass IntegrationView(ApiURLView):\n template_name = 'zerver/integrations/index.html'\n\n def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:\n context = super().get_context_data(**kwargs) # type: Dict[str, Any]\n add_integrations_context(context)\n add_integrations_open_graph_context(context, self.request)\n return context\n\n\n@has_request_variables\ndef integration_doc(request: HttpRequest, integration_name: str=REQ(default=None)) -> HttpResponse:\n if not request.is_ajax():\n return HttpResponseNotFound()\n try:\n integration = INTEGRATIONS[integration_name]\n except KeyError:\n return HttpResponseNotFound()\n\n context = {} # type: Dict[str, Any]\n add_api_uri_context(context, request)\n\n context['integration_name'] = integration.name\n context['integration_display_name'] = integration.display_name\n if hasattr(integration, 'stream_name'):\n context['recommended_stream_name'] = integration.stream_name\n if isinstance(integration, WebhookIntegration):\n context['integration_url'] = integration.url[3:]\n if isinstance(integration, HubotIntegration):\n context['hubot_docs_url'] = integration.hubot_docs_url\n if isinstance(integration, EmailIntegration):\n context['email_gateway_example'] = settings.EMAIL_GATEWAY_EXAMPLE\n\n doc_html_str = render_markdown_path(integration.doc, context)\n\n return HttpResponse(doc_html_str)\n", "path": "zerver/views/documentation.py"}]} | 3,218 | 144 |
gh_patches_debug_5172 | rasdani/github-patches | git_diff | yt-project__yt-4776 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: modifications through matplotlib engine cannot be properly displayed
<!--To help us understand and resolve your issue, please fill out the form to
the best of your ability.-->
<!--You can feel free to delete the sections that do not apply.-->
### Bug report
**Bug summary**
The Modifications through Matplotlib engine cannot be properly displayed.
Taking the following code for example, the expected modifications can only be shown by the containing matplotlib figure object like `fig.savefig("sloshing.png")`.
**Code for reproduction**
adapted from [docs](https://yt-project.org/docs/dev/cookbook/simple_plots.html#accessing-and-modifying-plots-directly) (also broken there)
```python
import numpy as np
import yt
# Load the dataset.
ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
# Create a slice object
slc = yt.SlicePlot(ds, "x", ("gas", "density"), width=(800.0, "kpc"))
# Get a reference to the matplotlib axes object for the plot
ax = slc.plots[("gas", "density")].axes
# Let's adjust the x axis tick labels
for label in ax.xaxis.get_ticklabels():
label.set_color("red")
label.set_fontsize(16)
# Get a reference to the matplotlib figure object for the plot
fig = slc.plots[("gas", "density")].figure
# And create a mini-panel of a gaussian histogram inside the plot
rect = (0.2, 0.2, 0.2, 0.2)
new_ax = fig.add_axes(rect)
n, bins, patches = new_ax.hist(
np.random.randn(1000) + 20, 50, facecolor="black", edgecolor="black"
)
# Make sure its visible
new_ax.tick_params(colors="white")
# And label it
la = new_ax.set_xlabel("Dinosaurs per furlong")
la.set_color("white")
slc.save()
```
**Actual outcome**

**Expected outcome**
The changes of the x-axis tick labels

**Version Information**
* Operating System: MacOS 14.1.1 and Red Hat Enterprise Linux Server release 7.8 (Maipo)
* Python Version: 3.9
* yt version: 4.2.1 and 4.3.0
<!--Please tell us how you installed yt and python e.g., from source,
pip, conda. If you installed from conda, please specify which channel you used
if not the default-->
</issue>
<code>
[start of doc/source/cookbook/simple_slice_matplotlib_example.py]
1 import numpy as np
2
3 import yt
4
5 # Load the dataset.
6 ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
7
8 # Create a slice object
9 slc = yt.SlicePlot(ds, "x", ("gas", "density"), width=(800.0, "kpc"))
10
11 # Get a reference to the matplotlib axes object for the plot
12 ax = slc.plots[("gas", "density")].axes
13
14 # Let's adjust the x axis tick labels
15 for label in ax.xaxis.get_ticklabels():
16 label.set_color("red")
17 label.set_fontsize(16)
18
19 # Get a reference to the matplotlib figure object for the plot
20 fig = slc.plots[("gas", "density")].figure
21
22 # And create a mini-panel of a gaussian histogram inside the plot
23 rect = (0.2, 0.2, 0.2, 0.2)
24 new_ax = fig.add_axes(rect)
25
26 n, bins, patches = new_ax.hist(
27 np.random.randn(1000) + 20, 50, facecolor="black", edgecolor="black"
28 )
29
30 # Make sure its visible
31 new_ax.tick_params(colors="white")
32
33 # And label it
34 la = new_ax.set_xlabel("Dinosaurs per furlong")
35 la.set_color("white")
36
37 slc.save()
38
[end of doc/source/cookbook/simple_slice_matplotlib_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/source/cookbook/simple_slice_matplotlib_example.py b/doc/source/cookbook/simple_slice_matplotlib_example.py
--- a/doc/source/cookbook/simple_slice_matplotlib_example.py
+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py
@@ -8,6 +8,10 @@
# Create a slice object
slc = yt.SlicePlot(ds, "x", ("gas", "density"), width=(800.0, "kpc"))
+# Rendering should be performed explicitly *before* any modification is
+# performed directly with matplotlib.
+slc.render()
+
# Get a reference to the matplotlib axes object for the plot
ax = slc.plots[("gas", "density")].axes
| {"golden_diff": "diff --git a/doc/source/cookbook/simple_slice_matplotlib_example.py b/doc/source/cookbook/simple_slice_matplotlib_example.py\n--- a/doc/source/cookbook/simple_slice_matplotlib_example.py\n+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py\n@@ -8,6 +8,10 @@\n # Create a slice object\n slc = yt.SlicePlot(ds, \"x\", (\"gas\", \"density\"), width=(800.0, \"kpc\"))\n \n+# Rendering should be performed explicitly *before* any modification is\n+# performed directly with matplotlib.\n+slc.render()\n+\n # Get a reference to the matplotlib axes object for the plot\n ax = slc.plots[(\"gas\", \"density\")].axes\n", "issue": "BUG: modifications through matplotlib engine cannot be properly displayed\n<!--To help us understand and resolve your issue, please fill out the form to\r\nthe best of your ability.-->\r\n<!--You can feel free to delete the sections that do not apply.-->\r\n\r\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nThe Modifications through Matplotlib engine cannot be properly displayed. \r\n\r\nTaking the following code for example, the expected modifications can only be shown by the containing matplotlib figure object like `fig.savefig(\"sloshing.png\")`. \r\n\r\n**Code for reproduction**\r\n\r\nadapted from [docs](https://yt-project.org/docs/dev/cookbook/simple_plots.html#accessing-and-modifying-plots-directly) (also broken there)\r\n\r\n```python\r\nimport numpy as np\r\n\r\nimport yt\r\n\r\n# Load the dataset.\r\nds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\r\n\r\n# Create a slice object\r\nslc = yt.SlicePlot(ds, \"x\", (\"gas\", \"density\"), width=(800.0, \"kpc\"))\r\n\r\n# Get a reference to the matplotlib axes object for the plot\r\nax = slc.plots[(\"gas\", \"density\")].axes\r\n\r\n# Let's adjust the x axis tick labels\r\nfor label in ax.xaxis.get_ticklabels():\r\n label.set_color(\"red\")\r\n label.set_fontsize(16)\r\n\r\n# Get a reference to the matplotlib figure object for the plot\r\nfig = slc.plots[(\"gas\", \"density\")].figure\r\n\r\n# And create a mini-panel of a gaussian histogram inside the plot\r\nrect = (0.2, 0.2, 0.2, 0.2)\r\nnew_ax = fig.add_axes(rect)\r\n\r\nn, bins, patches = new_ax.hist(\r\n np.random.randn(1000) + 20, 50, facecolor=\"black\", edgecolor=\"black\"\r\n)\r\n\r\n# Make sure its visible\r\nnew_ax.tick_params(colors=\"white\")\r\n\r\n# And label it\r\nla = new_ax.set_xlabel(\"Dinosaurs per furlong\")\r\nla.set_color(\"white\")\r\n\r\nslc.save()\r\n```\r\n\r\n**Actual outcome**\r\n\r\n\r\n\r\n**Expected outcome**\r\n\r\nThe changes of the x-axis tick labels\r\n\r\n\r\n**Version Information**\r\n * Operating System: MacOS 14.1.1 and Red Hat Enterprise Linux Server release 7.8 (Maipo)\r\n * Python Version: 3.9\r\n * yt version: 4.2.1 and 4.3.0\r\n\r\n<!--Please tell us how you installed yt and python e.g., from source,\r\npip, conda. If you installed from conda, please specify which channel you used\r\nif not the default-->\r\n\n", "before_files": [{"content": "import numpy as np\n\nimport yt\n\n# Load the dataset.\nds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\n\n# Create a slice object\nslc = yt.SlicePlot(ds, \"x\", (\"gas\", \"density\"), width=(800.0, \"kpc\"))\n\n# Get a reference to the matplotlib axes object for the plot\nax = slc.plots[(\"gas\", \"density\")].axes\n\n# Let's adjust the x axis tick labels\nfor label in ax.xaxis.get_ticklabels():\n label.set_color(\"red\")\n label.set_fontsize(16)\n\n# Get a reference to the matplotlib figure object for the plot\nfig = slc.plots[(\"gas\", \"density\")].figure\n\n# And create a mini-panel of a gaussian histogram inside the plot\nrect = (0.2, 0.2, 0.2, 0.2)\nnew_ax = fig.add_axes(rect)\n\nn, bins, patches = new_ax.hist(\n np.random.randn(1000) + 20, 50, facecolor=\"black\", edgecolor=\"black\"\n)\n\n# Make sure its visible\nnew_ax.tick_params(colors=\"white\")\n\n# And label it\nla = new_ax.set_xlabel(\"Dinosaurs per furlong\")\nla.set_color(\"white\")\n\nslc.save()\n", "path": "doc/source/cookbook/simple_slice_matplotlib_example.py"}]} | 1,642 | 157 |
gh_patches_debug_32427 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-2356 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pytest function scoped fixtures should run once per example, not once per test
Because of the way Hypothesis just exposes a single test function, pytest just runs its fixtures once for that function.
This is a long running problem with Hypothesis's interaction with pytest and there is [an open bug about it on the pytest side too](https://github.com/pytest-dev/pytest/issues/916).
Fixing this is tricky. The [fixtures-integration](https://github.com/HypothesisWorks/hypothesis-python/tree/fixtures-integration) branch has a partial prototype which makes _some_ progress towards making it work, but it's very far from complete and has a number of issues which are likely to need substantial work to fix.
I [am currently seeking funding to work on this ticket](http://hypothesis.works/articles/pytest-integration-sponsorship/) and it is unlikely to make much progress without said funding.
</issue>
<code>
[start of hypothesis-python/src/hypothesis/extra/pytestplugin.py]
1 # This file is part of Hypothesis, which may be found at
2 # https://github.com/HypothesisWorks/hypothesis/
3 #
4 # Most of this work is copyright (C) 2013-2020 David R. MacIver
5 # ([email protected]), but it contains contributions by others. See
6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
7 # consult the git log if you need to determine who owns an individual
8 # contribution.
9 #
10 # This Source Code Form is subject to the terms of the Mozilla Public License,
11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
12 # obtain one at https://mozilla.org/MPL/2.0/.
13 #
14 # END HEADER
15
16 from distutils.version import LooseVersion
17
18 import pytest
19
20 from hypothesis import Verbosity, core, settings
21 from hypothesis.errors import InvalidArgument
22 from hypothesis.internal.detection import is_hypothesis_test
23 from hypothesis.reporting import default as default_reporter, with_reporter
24 from hypothesis.statistics import collector
25
26 LOAD_PROFILE_OPTION = "--hypothesis-profile"
27 VERBOSITY_OPTION = "--hypothesis-verbosity"
28 PRINT_STATISTICS_OPTION = "--hypothesis-show-statistics"
29 SEED_OPTION = "--hypothesis-seed"
30
31
32 class StoringReporter:
33 def __init__(self, config):
34 self.config = config
35 self.results = []
36
37 def __call__(self, msg):
38 if self.config.getoption("capture", "fd") == "no":
39 default_reporter(msg)
40 if not isinstance(msg, str):
41 msg = repr(msg)
42 self.results.append(msg)
43
44
45 if LooseVersion(pytest.__version__) < "4.3": # pragma: no cover
46 import warnings
47 from hypothesis.errors import HypothesisWarning
48
49 PYTEST_TOO_OLD_MESSAGE = """
50 You are using Pytest version %s. Hypothesis tests work with any test
51 runner, but our Pytest plugin requires Pytest 4.3 or newer.
52 Note that the Pytest developers no longer support this version either!
53 Disabling the Hypothesis pytest plugin...
54 """
55 warnings.warn(PYTEST_TOO_OLD_MESSAGE % (pytest.__version__,), HypothesisWarning)
56
57 else:
58
59 def pytest_addoption(parser):
60 group = parser.getgroup("hypothesis", "Hypothesis")
61 group.addoption(
62 LOAD_PROFILE_OPTION,
63 action="store",
64 help="Load in a registered hypothesis.settings profile",
65 )
66 group.addoption(
67 VERBOSITY_OPTION,
68 action="store",
69 choices=[opt.name for opt in Verbosity],
70 help="Override profile with verbosity setting specified",
71 )
72 group.addoption(
73 PRINT_STATISTICS_OPTION,
74 action="store_true",
75 help="Configure when statistics are printed",
76 default=False,
77 )
78 group.addoption(
79 SEED_OPTION,
80 action="store",
81 help="Set a seed to use for all Hypothesis tests",
82 )
83
84 def pytest_report_header(config):
85 profile = config.getoption(LOAD_PROFILE_OPTION)
86 if not profile:
87 profile = settings._current_profile
88 settings_str = settings.get_profile(profile).show_changed()
89 if settings_str != "":
90 settings_str = " -> %s" % (settings_str)
91 if (
92 config.option.verbose >= 1
93 or settings.default.verbosity >= Verbosity.verbose
94 ):
95 return "hypothesis profile %r%s" % (profile, settings_str)
96
97 def pytest_configure(config):
98 core.running_under_pytest = True
99 profile = config.getoption(LOAD_PROFILE_OPTION)
100 if profile:
101 settings.load_profile(profile)
102 verbosity_name = config.getoption(VERBOSITY_OPTION)
103 if verbosity_name:
104 verbosity_value = Verbosity[verbosity_name]
105 profile_name = "%s-with-%s-verbosity" % (
106 settings._current_profile,
107 verbosity_name,
108 )
109 # register_profile creates a new profile, exactly like the current one,
110 # with the extra values given (in this case 'verbosity')
111 settings.register_profile(profile_name, verbosity=verbosity_value)
112 settings.load_profile(profile_name)
113 seed = config.getoption(SEED_OPTION)
114 if seed is not None:
115 try:
116 seed = int(seed)
117 except ValueError:
118 pass
119 core.global_force_seed = seed
120 config.addinivalue_line("markers", "hypothesis: Tests which use hypothesis.")
121
122 @pytest.hookimpl(hookwrapper=True)
123 def pytest_runtest_call(item):
124 if not hasattr(item, "obj"):
125 yield
126 elif not is_hypothesis_test(item.obj):
127 # If @given was not applied, check whether other hypothesis
128 # decorators were applied, and raise an error if they were.
129 if getattr(item.obj, "is_hypothesis_strategy_function", False):
130 raise InvalidArgument(
131 "%s is a function that returns a Hypothesis strategy, but pytest "
132 "has collected it as a test function. This is useless as the "
133 "function body will never be executed. To define a test "
134 "function, use @given instead of @composite." % (item.nodeid,)
135 )
136 message = "Using `@%s` on a test without `@given` is completely pointless."
137 for name, attribute in [
138 ("example", "hypothesis_explicit_examples"),
139 ("seed", "_hypothesis_internal_use_seed"),
140 ("settings", "_hypothesis_internal_settings_applied"),
141 ("reproduce_example", "_hypothesis_internal_use_reproduce_failure"),
142 ]:
143 if hasattr(item.obj, attribute):
144 raise InvalidArgument(message % (name,))
145 yield
146 else:
147 if item.get_closest_marker("parametrize") is not None:
148 # Give every parametrized test invocation a unique database key
149 key = item.nodeid.encode("utf-8")
150 item.obj.hypothesis.inner_test._hypothesis_internal_add_digest = key
151
152 store = StoringReporter(item.config)
153
154 def note_statistics(stats):
155 lines = [item.nodeid + ":", ""] + stats.get_description() + [""]
156 item.hypothesis_statistics = lines
157
158 with collector.with_value(note_statistics):
159 with with_reporter(store):
160 yield
161 if store.results:
162 item.hypothesis_report_information = list(store.results)
163
164 @pytest.hookimpl(hookwrapper=True)
165 def pytest_runtest_makereport(item, call):
166 report = (yield).get_result()
167 if hasattr(item, "hypothesis_report_information"):
168 report.sections.append(
169 ("Hypothesis", "\n".join(item.hypothesis_report_information))
170 )
171 if hasattr(item, "hypothesis_statistics") and report.when == "teardown":
172 val = ("hypothesis-stats", item.hypothesis_statistics)
173 report.user_properties.append(val)
174
175 def pytest_terminal_summary(terminalreporter):
176 if not terminalreporter.config.getoption(PRINT_STATISTICS_OPTION):
177 return
178 terminalreporter.section("Hypothesis Statistics")
179 # terminalreporter.stats is a dict, where the empty string appears to
180 # always be the key for a list of _pytest.reports.TestReport objects
181 # (where we stored the statistics data in pytest_runtest_makereport above)
182 for test_report in terminalreporter.stats.get("", []):
183 for name, lines in test_report.user_properties:
184 if name == "hypothesis-stats" and test_report.when == "teardown":
185 for li in lines:
186 terminalreporter.write_line(li)
187
188 def pytest_collection_modifyitems(items):
189 for item in items:
190 if isinstance(item, pytest.Function) and is_hypothesis_test(item.obj):
191 item.add_marker("hypothesis")
192
193
194 def load():
195 """Required for `pluggy` to load a plugin from setuptools entrypoints."""
196
[end of hypothesis-python/src/hypothesis/extra/pytestplugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hypothesis-python/src/hypothesis/extra/pytestplugin.py b/hypothesis-python/src/hypothesis/extra/pytestplugin.py
--- a/hypothesis-python/src/hypothesis/extra/pytestplugin.py
+++ b/hypothesis-python/src/hypothesis/extra/pytestplugin.py
@@ -14,10 +14,12 @@
# END HEADER
from distutils.version import LooseVersion
+from inspect import signature
import pytest
from hypothesis import Verbosity, core, settings
+from hypothesis._settings import note_deprecation
from hypothesis.errors import InvalidArgument
from hypothesis.internal.detection import is_hypothesis_test
from hypothesis.reporting import default as default_reporter, with_reporter
@@ -144,6 +146,25 @@
raise InvalidArgument(message % (name,))
yield
else:
+ # Warn about function-scoped fixtures, excluding autouse fixtures because
+ # the advice is probably not actionable and the status quo seems OK...
+ # See https://github.com/HypothesisWorks/hypothesis/issues/377 for detail.
+ argnames = None
+ for fx_defs in item._request._fixturemanager.getfixtureinfo(
+ node=item, func=item.function, cls=None
+ ).name2fixturedefs.values():
+ if argnames is None:
+ argnames = frozenset(signature(item.function).parameters)
+ for fx in fx_defs:
+ if fx.scope == "function" and fx.argname in argnames:
+ note_deprecation(
+ "%s uses the %r fixture, but function-scoped fixtures "
+ "should not be used with @given(...) tests, because "
+ "fixtures are not reset between generated examples!"
+ % (item.nodeid, fx.argname),
+ since="RELEASEDAY",
+ )
+
if item.get_closest_marker("parametrize") is not None:
# Give every parametrized test invocation a unique database key
key = item.nodeid.encode("utf-8")
| {"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/extra/pytestplugin.py b/hypothesis-python/src/hypothesis/extra/pytestplugin.py\n--- a/hypothesis-python/src/hypothesis/extra/pytestplugin.py\n+++ b/hypothesis-python/src/hypothesis/extra/pytestplugin.py\n@@ -14,10 +14,12 @@\n # END HEADER\n \n from distutils.version import LooseVersion\n+from inspect import signature\n \n import pytest\n \n from hypothesis import Verbosity, core, settings\n+from hypothesis._settings import note_deprecation\n from hypothesis.errors import InvalidArgument\n from hypothesis.internal.detection import is_hypothesis_test\n from hypothesis.reporting import default as default_reporter, with_reporter\n@@ -144,6 +146,25 @@\n raise InvalidArgument(message % (name,))\n yield\n else:\n+ # Warn about function-scoped fixtures, excluding autouse fixtures because\n+ # the advice is probably not actionable and the status quo seems OK...\n+ # See https://github.com/HypothesisWorks/hypothesis/issues/377 for detail.\n+ argnames = None\n+ for fx_defs in item._request._fixturemanager.getfixtureinfo(\n+ node=item, func=item.function, cls=None\n+ ).name2fixturedefs.values():\n+ if argnames is None:\n+ argnames = frozenset(signature(item.function).parameters)\n+ for fx in fx_defs:\n+ if fx.scope == \"function\" and fx.argname in argnames:\n+ note_deprecation(\n+ \"%s uses the %r fixture, but function-scoped fixtures \"\n+ \"should not be used with @given(...) tests, because \"\n+ \"fixtures are not reset between generated examples!\"\n+ % (item.nodeid, fx.argname),\n+ since=\"RELEASEDAY\",\n+ )\n+\n if item.get_closest_marker(\"parametrize\") is not None:\n # Give every parametrized test invocation a unique database key\n key = item.nodeid.encode(\"utf-8\")\n", "issue": "pytest function scoped fixtures should run once per example, not once per test\nBecause of the way Hypothesis just exposes a single test function, pytest just runs its fixtures once for that function.\n\nThis is a long running problem with Hypothesis's interaction with pytest and there is [an open bug about it on the pytest side too](https://github.com/pytest-dev/pytest/issues/916).\n\nFixing this is tricky. The [fixtures-integration](https://github.com/HypothesisWorks/hypothesis-python/tree/fixtures-integration) branch has a partial prototype which makes _some_ progress towards making it work, but it's very far from complete and has a number of issues which are likely to need substantial work to fix.\n\nI [am currently seeking funding to work on this ticket](http://hypothesis.works/articles/pytest-integration-sponsorship/) and it is unlikely to make much progress without said funding.\n\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2020 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom distutils.version import LooseVersion\n\nimport pytest\n\nfrom hypothesis import Verbosity, core, settings\nfrom hypothesis.errors import InvalidArgument\nfrom hypothesis.internal.detection import is_hypothesis_test\nfrom hypothesis.reporting import default as default_reporter, with_reporter\nfrom hypothesis.statistics import collector\n\nLOAD_PROFILE_OPTION = \"--hypothesis-profile\"\nVERBOSITY_OPTION = \"--hypothesis-verbosity\"\nPRINT_STATISTICS_OPTION = \"--hypothesis-show-statistics\"\nSEED_OPTION = \"--hypothesis-seed\"\n\n\nclass StoringReporter:\n def __init__(self, config):\n self.config = config\n self.results = []\n\n def __call__(self, msg):\n if self.config.getoption(\"capture\", \"fd\") == \"no\":\n default_reporter(msg)\n if not isinstance(msg, str):\n msg = repr(msg)\n self.results.append(msg)\n\n\nif LooseVersion(pytest.__version__) < \"4.3\": # pragma: no cover\n import warnings\n from hypothesis.errors import HypothesisWarning\n\n PYTEST_TOO_OLD_MESSAGE = \"\"\"\n You are using Pytest version %s. Hypothesis tests work with any test\n runner, but our Pytest plugin requires Pytest 4.3 or newer.\n Note that the Pytest developers no longer support this version either!\n Disabling the Hypothesis pytest plugin...\n \"\"\"\n warnings.warn(PYTEST_TOO_OLD_MESSAGE % (pytest.__version__,), HypothesisWarning)\n\nelse:\n\n def pytest_addoption(parser):\n group = parser.getgroup(\"hypothesis\", \"Hypothesis\")\n group.addoption(\n LOAD_PROFILE_OPTION,\n action=\"store\",\n help=\"Load in a registered hypothesis.settings profile\",\n )\n group.addoption(\n VERBOSITY_OPTION,\n action=\"store\",\n choices=[opt.name for opt in Verbosity],\n help=\"Override profile with verbosity setting specified\",\n )\n group.addoption(\n PRINT_STATISTICS_OPTION,\n action=\"store_true\",\n help=\"Configure when statistics are printed\",\n default=False,\n )\n group.addoption(\n SEED_OPTION,\n action=\"store\",\n help=\"Set a seed to use for all Hypothesis tests\",\n )\n\n def pytest_report_header(config):\n profile = config.getoption(LOAD_PROFILE_OPTION)\n if not profile:\n profile = settings._current_profile\n settings_str = settings.get_profile(profile).show_changed()\n if settings_str != \"\":\n settings_str = \" -> %s\" % (settings_str)\n if (\n config.option.verbose >= 1\n or settings.default.verbosity >= Verbosity.verbose\n ):\n return \"hypothesis profile %r%s\" % (profile, settings_str)\n\n def pytest_configure(config):\n core.running_under_pytest = True\n profile = config.getoption(LOAD_PROFILE_OPTION)\n if profile:\n settings.load_profile(profile)\n verbosity_name = config.getoption(VERBOSITY_OPTION)\n if verbosity_name:\n verbosity_value = Verbosity[verbosity_name]\n profile_name = \"%s-with-%s-verbosity\" % (\n settings._current_profile,\n verbosity_name,\n )\n # register_profile creates a new profile, exactly like the current one,\n # with the extra values given (in this case 'verbosity')\n settings.register_profile(profile_name, verbosity=verbosity_value)\n settings.load_profile(profile_name)\n seed = config.getoption(SEED_OPTION)\n if seed is not None:\n try:\n seed = int(seed)\n except ValueError:\n pass\n core.global_force_seed = seed\n config.addinivalue_line(\"markers\", \"hypothesis: Tests which use hypothesis.\")\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_call(item):\n if not hasattr(item, \"obj\"):\n yield\n elif not is_hypothesis_test(item.obj):\n # If @given was not applied, check whether other hypothesis\n # decorators were applied, and raise an error if they were.\n if getattr(item.obj, \"is_hypothesis_strategy_function\", False):\n raise InvalidArgument(\n \"%s is a function that returns a Hypothesis strategy, but pytest \"\n \"has collected it as a test function. This is useless as the \"\n \"function body will never be executed. To define a test \"\n \"function, use @given instead of @composite.\" % (item.nodeid,)\n )\n message = \"Using `@%s` on a test without `@given` is completely pointless.\"\n for name, attribute in [\n (\"example\", \"hypothesis_explicit_examples\"),\n (\"seed\", \"_hypothesis_internal_use_seed\"),\n (\"settings\", \"_hypothesis_internal_settings_applied\"),\n (\"reproduce_example\", \"_hypothesis_internal_use_reproduce_failure\"),\n ]:\n if hasattr(item.obj, attribute):\n raise InvalidArgument(message % (name,))\n yield\n else:\n if item.get_closest_marker(\"parametrize\") is not None:\n # Give every parametrized test invocation a unique database key\n key = item.nodeid.encode(\"utf-8\")\n item.obj.hypothesis.inner_test._hypothesis_internal_add_digest = key\n\n store = StoringReporter(item.config)\n\n def note_statistics(stats):\n lines = [item.nodeid + \":\", \"\"] + stats.get_description() + [\"\"]\n item.hypothesis_statistics = lines\n\n with collector.with_value(note_statistics):\n with with_reporter(store):\n yield\n if store.results:\n item.hypothesis_report_information = list(store.results)\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_makereport(item, call):\n report = (yield).get_result()\n if hasattr(item, \"hypothesis_report_information\"):\n report.sections.append(\n (\"Hypothesis\", \"\\n\".join(item.hypothesis_report_information))\n )\n if hasattr(item, \"hypothesis_statistics\") and report.when == \"teardown\":\n val = (\"hypothesis-stats\", item.hypothesis_statistics)\n report.user_properties.append(val)\n\n def pytest_terminal_summary(terminalreporter):\n if not terminalreporter.config.getoption(PRINT_STATISTICS_OPTION):\n return\n terminalreporter.section(\"Hypothesis Statistics\")\n # terminalreporter.stats is a dict, where the empty string appears to\n # always be the key for a list of _pytest.reports.TestReport objects\n # (where we stored the statistics data in pytest_runtest_makereport above)\n for test_report in terminalreporter.stats.get(\"\", []):\n for name, lines in test_report.user_properties:\n if name == \"hypothesis-stats\" and test_report.when == \"teardown\":\n for li in lines:\n terminalreporter.write_line(li)\n\n def pytest_collection_modifyitems(items):\n for item in items:\n if isinstance(item, pytest.Function) and is_hypothesis_test(item.obj):\n item.add_marker(\"hypothesis\")\n\n\ndef load():\n \"\"\"Required for `pluggy` to load a plugin from setuptools entrypoints.\"\"\"\n", "path": "hypothesis-python/src/hypothesis/extra/pytestplugin.py"}]} | 2,916 | 444 |
gh_patches_debug_30981 | rasdani/github-patches | git_diff | mosaicml__composer-298 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TQDM spams lines during validation when running trainer.fit() in notebook
** Environment **
Colab notebook.
<img width="607" alt="TQDM Spam" src="https://user-images.githubusercontent.com/11728034/151625792-31dc2c0e-d572-43d1-9514-5127a6a3751b.png">
** To reproduce
run `trainer.fit()` in a Colab notebook. See [this notebook](https://colab.research.google.com/drive/12Dl0NVDaj4tf4gfpfg-rkIAoO_H7edo3?usp=sharing) for an example.
## Expected behavior
TQDM output should remain on a single line during evaluation, which happens during training.
</issue>
<code>
[start of composer/loggers/tqdm_logger.py]
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from __future__ import annotations
4
5 import sys
6 from dataclasses import asdict, dataclass
7 from typing import TYPE_CHECKING, Any, Dict, List, Optional
8
9 import tqdm
10 import yaml
11
12 from composer.core.logging import LogLevel, TLogData, TLogDataValue, format_log_data_value
13 from composer.core.logging.base_backend import BaseLoggerBackend
14 from composer.core.state import State
15 from composer.core.types import StateDict
16 from composer.utils import dist
17
18 if TYPE_CHECKING:
19 from composer.core.logging import Logger
20
21 _IS_TRAIN_TO_KEYS_TO_LOG = {True: ['loss/train'], False: ['accuracy/val']}
22
23
24 @dataclass
25 class _TQDMLoggerInstanceState:
26 total: Optional[int]
27 description: str
28 position: int
29 keys_to_log: List[str]
30 n: int
31 epoch_metrics: Dict[str, TLogDataValue]
32
33
34 class _TQDMLoggerInstance:
35
36 def __init__(self, state: _TQDMLoggerInstanceState) -> None:
37 self.state = state
38 self.pbar = tqdm.tqdm(total=state.total,
39 desc=state.description,
40 position=state.position,
41 bar_format="{l_bar}{bar:10}{r_bar}{bar:-10b}")
42 self.pbar.set_postfix(state.epoch_metrics)
43
44 def log_metric(self, data: TLogData):
45 formatted_data = {k: format_log_data_value(v) for (k, v) in data.items() if k in self.state.keys_to_log}
46 self.state.epoch_metrics.update(formatted_data)
47 self.pbar.set_postfix(self.state.epoch_metrics)
48
49 def update(self):
50 self.pbar.update()
51 self.state.n = self.pbar.n
52
53 def close(self):
54 self.pbar.close()
55
56 def state_dict(self) -> StateDict:
57 return asdict(self.state)
58
59
60 class TQDMLoggerBackend(BaseLoggerBackend):
61 """Shows TQDM progress bars.
62
63 During training, the progress bar logs the batch and training loss.
64 During validation, the progress bar logs the batch and validation accuracy.
65
66 Example output::
67
68 Epoch 1: 100%|██████████| 64/64 [00:01<00:00, 53.17it/s, loss/train=2.3023]
69 Epoch 1 (val): 100%|██████████| 20/20 [00:00<00:00, 100.96it/s, accuracy/val=0.0995]
70
71 .. note::
72
73 It is currently not possible to show additional metrics.
74 Custom metrics for the TQDM progress bar will be supported in a future version.
75
76 Args:
77 config (dict or None, optional):
78 Trainer configuration. If provided, it is printed to the terminal as YAML.
79 """
80
81 def __init__(self, config: Optional[Dict[str, Any]] = None) -> None:
82 super().__init__()
83 self.pbars: Dict[bool, _TQDMLoggerInstance] = {}
84 self.is_train: Optional[bool] = None
85 self.config = config
86
87 def will_log(self, state: State, log_level: LogLevel) -> bool:
88 del state # Unused
89 return dist.get_global_rank() == 0 and log_level <= LogLevel.BATCH
90
91 def log_metric(self, epoch: int, step: int, log_level: LogLevel, data: TLogData) -> None:
92 del epoch, step, log_level # Unused
93 if self.is_train in self.pbars:
94 # Logging outside an epoch
95 assert self.is_train is not None
96 self.pbars[self.is_train].log_metric(data)
97
98 def init(self, state: State, logger: Logger) -> None:
99 del state, logger # unused
100 if self.config is not None:
101 print("Config")
102 print("-" * 30)
103 yaml.safe_dump(self.config, stream=sys.stdout)
104 print("-" * 30)
105 print()
106
107 def _start(self, state: State):
108 if dist.get_global_rank() != 0:
109 return
110 assert self.is_train is not None, "self.is_train should be set by the callback"
111 # TODO(anis) -- in #120, len(state.eval_dataloader) is inaccurate, as it does not incorporate
112 # trainer._eval_subset_num_batches. The evaluator spec should fix this.
113 total_steps = state.steps_per_epoch if self.is_train else len(state.eval_dataloader)
114 desc = f'Epoch {int(state.timer.epoch)}'
115 position = 0 if self.is_train else 1
116 if not self.is_train:
117 desc += f", Batch {int(state.timer.batch)} (val)"
118 self.pbars[self.is_train] = _TQDMLoggerInstance(
119 _TQDMLoggerInstanceState(total=total_steps,
120 position=position,
121 n=0,
122 keys_to_log=_IS_TRAIN_TO_KEYS_TO_LOG[self.is_train],
123 description=desc,
124 epoch_metrics={}))
125
126 def epoch_start(self, state: State, logger: Logger) -> None:
127 del logger # unused
128 if dist.get_global_rank() != 0:
129 return
130 self.is_train = True
131 self._start(state)
132
133 def eval_start(self, state: State, logger: Logger) -> None:
134 del logger # unused
135 if dist.get_global_rank() != 0:
136 return
137 self.is_train = False
138 self._start(state)
139
140 def _update(self):
141 if dist.get_global_rank() != 0:
142 return
143 if self.is_train in self.pbars:
144 assert self.is_train is not None
145 self.pbars[self.is_train].update()
146
147 def batch_end(self, state: State, logger: Logger) -> None:
148 del state, logger # unused
149 if dist.get_global_rank() != 0:
150 return
151 self._update()
152
153 def eval_after_forward(self, state: State, logger: Logger) -> None:
154 del state, logger # unused
155 if dist.get_global_rank() != 0:
156 return
157 self._update()
158
159 def _end(self):
160 if dist.get_global_rank() != 0:
161 return
162 if self.is_train in self.pbars:
163 assert self.is_train is not None
164 self.pbars[self.is_train].close()
165 del self.pbars[self.is_train]
166 self.is_train = None
167
168 def epoch_end(self, state: State, logger: Logger) -> None:
169 del state, logger # unused
170 if dist.get_global_rank() != 0:
171 return
172 self._end()
173
174 def eval_end(self, state: State, logger: Logger) -> None:
175 del state, logger # unused
176 if dist.get_global_rank() != 0:
177 return
178 self._end()
179
180 def state_dict(self) -> StateDict:
181 return {
182 "pbars": {k: v.state_dict() for (k, v) in self.pbars.items()},
183 "is_train": self.is_train,
184 }
185
186 def load_state_dict(self, state: StateDict) -> None:
187 self.pbars = {k: _TQDMLoggerInstance(**v) for (k, v) in state["pbars"].items()}
188 self.is_train = state["is_train"]
189
[end of composer/loggers/tqdm_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/composer/loggers/tqdm_logger.py b/composer/loggers/tqdm_logger.py
--- a/composer/loggers/tqdm_logger.py
+++ b/composer/loggers/tqdm_logger.py
@@ -6,8 +6,8 @@
from dataclasses import asdict, dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Optional
-import tqdm
import yaml
+from tqdm import auto
from composer.core.logging import LogLevel, TLogData, TLogDataValue, format_log_data_value
from composer.core.logging.base_backend import BaseLoggerBackend
@@ -35,7 +35,7 @@
def __init__(self, state: _TQDMLoggerInstanceState) -> None:
self.state = state
- self.pbar = tqdm.tqdm(total=state.total,
+ self.pbar = auto.tqdm(total=state.total,
desc=state.description,
position=state.position,
bar_format="{l_bar}{bar:10}{r_bar}{bar:-10b}")
@@ -65,12 +65,12 @@
Example output::
- Epoch 1: 100%|██████████| 64/64 [00:01<00:00, 53.17it/s, loss/train=2.3023]
- Epoch 1 (val): 100%|██████████| 20/20 [00:00<00:00, 100.96it/s, accuracy/val=0.0995]
+ Epoch 1: 100%|██████████| 64/64 [00:01<00:00, 53.17it/s, loss/train=2.3023]
+ Epoch 1 (val): 100%|██████████| 20/20 [00:00<00:00, 100.96it/s, accuracy/val=0.0995]
.. note::
- It is currently not possible to show additional metrics.
+ It is currently not possible to show additional metrics.
Custom metrics for the TQDM progress bar will be supported in a future version.
Args:
| {"golden_diff": "diff --git a/composer/loggers/tqdm_logger.py b/composer/loggers/tqdm_logger.py\n--- a/composer/loggers/tqdm_logger.py\n+++ b/composer/loggers/tqdm_logger.py\n@@ -6,8 +6,8 @@\n from dataclasses import asdict, dataclass\n from typing import TYPE_CHECKING, Any, Dict, List, Optional\n \n-import tqdm\n import yaml\n+from tqdm import auto\n \n from composer.core.logging import LogLevel, TLogData, TLogDataValue, format_log_data_value\n from composer.core.logging.base_backend import BaseLoggerBackend\n@@ -35,7 +35,7 @@\n \n def __init__(self, state: _TQDMLoggerInstanceState) -> None:\n self.state = state\n- self.pbar = tqdm.tqdm(total=state.total,\n+ self.pbar = auto.tqdm(total=state.total,\n desc=state.description,\n position=state.position,\n bar_format=\"{l_bar}{bar:10}{r_bar}{bar:-10b}\")\n@@ -65,12 +65,12 @@\n \n Example output::\n \n- Epoch 1: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 64/64 [00:01<00:00, 53.17it/s, loss/train=2.3023] \n- Epoch 1 (val): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 20/20 [00:00<00:00, 100.96it/s, accuracy/val=0.0995] \n+ Epoch 1: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 64/64 [00:01<00:00, 53.17it/s, loss/train=2.3023]\n+ Epoch 1 (val): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 20/20 [00:00<00:00, 100.96it/s, accuracy/val=0.0995]\n \n .. note::\n \n- It is currently not possible to show additional metrics. \n+ It is currently not possible to show additional metrics.\n Custom metrics for the TQDM progress bar will be supported in a future version.\n \n Args:\n", "issue": "TQDM spams lines during validation when running trainer.fit() in notebook\n** Environment **\r\nColab notebook.\r\n\r\n<img width=\"607\" alt=\"TQDM Spam\" src=\"https://user-images.githubusercontent.com/11728034/151625792-31dc2c0e-d572-43d1-9514-5127a6a3751b.png\">\r\n\r\n** To reproduce\r\n\r\nrun `trainer.fit()` in a Colab notebook. See [this notebook](https://colab.research.google.com/drive/12Dl0NVDaj4tf4gfpfg-rkIAoO_H7edo3?usp=sharing) for an example.\r\n\r\n## Expected behavior\r\n\r\nTQDM output should remain on a single line during evaluation, which happens during training.\r\n\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport sys\nfrom dataclasses import asdict, dataclass\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional\n\nimport tqdm\nimport yaml\n\nfrom composer.core.logging import LogLevel, TLogData, TLogDataValue, format_log_data_value\nfrom composer.core.logging.base_backend import BaseLoggerBackend\nfrom composer.core.state import State\nfrom composer.core.types import StateDict\nfrom composer.utils import dist\n\nif TYPE_CHECKING:\n from composer.core.logging import Logger\n\n_IS_TRAIN_TO_KEYS_TO_LOG = {True: ['loss/train'], False: ['accuracy/val']}\n\n\n@dataclass\nclass _TQDMLoggerInstanceState:\n total: Optional[int]\n description: str\n position: int\n keys_to_log: List[str]\n n: int\n epoch_metrics: Dict[str, TLogDataValue]\n\n\nclass _TQDMLoggerInstance:\n\n def __init__(self, state: _TQDMLoggerInstanceState) -> None:\n self.state = state\n self.pbar = tqdm.tqdm(total=state.total,\n desc=state.description,\n position=state.position,\n bar_format=\"{l_bar}{bar:10}{r_bar}{bar:-10b}\")\n self.pbar.set_postfix(state.epoch_metrics)\n\n def log_metric(self, data: TLogData):\n formatted_data = {k: format_log_data_value(v) for (k, v) in data.items() if k in self.state.keys_to_log}\n self.state.epoch_metrics.update(formatted_data)\n self.pbar.set_postfix(self.state.epoch_metrics)\n\n def update(self):\n self.pbar.update()\n self.state.n = self.pbar.n\n\n def close(self):\n self.pbar.close()\n\n def state_dict(self) -> StateDict:\n return asdict(self.state)\n\n\nclass TQDMLoggerBackend(BaseLoggerBackend):\n \"\"\"Shows TQDM progress bars.\n\n During training, the progress bar logs the batch and training loss.\n During validation, the progress bar logs the batch and validation accuracy.\n\n Example output::\n\n Epoch 1: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 64/64 [00:01<00:00, 53.17it/s, loss/train=2.3023] \n Epoch 1 (val): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 20/20 [00:00<00:00, 100.96it/s, accuracy/val=0.0995] \n\n .. note::\n\n It is currently not possible to show additional metrics. \n Custom metrics for the TQDM progress bar will be supported in a future version.\n\n Args:\n config (dict or None, optional):\n Trainer configuration. If provided, it is printed to the terminal as YAML.\n \"\"\"\n\n def __init__(self, config: Optional[Dict[str, Any]] = None) -> None:\n super().__init__()\n self.pbars: Dict[bool, _TQDMLoggerInstance] = {}\n self.is_train: Optional[bool] = None\n self.config = config\n\n def will_log(self, state: State, log_level: LogLevel) -> bool:\n del state # Unused\n return dist.get_global_rank() == 0 and log_level <= LogLevel.BATCH\n\n def log_metric(self, epoch: int, step: int, log_level: LogLevel, data: TLogData) -> None:\n del epoch, step, log_level # Unused\n if self.is_train in self.pbars:\n # Logging outside an epoch\n assert self.is_train is not None\n self.pbars[self.is_train].log_metric(data)\n\n def init(self, state: State, logger: Logger) -> None:\n del state, logger # unused\n if self.config is not None:\n print(\"Config\")\n print(\"-\" * 30)\n yaml.safe_dump(self.config, stream=sys.stdout)\n print(\"-\" * 30)\n print()\n\n def _start(self, state: State):\n if dist.get_global_rank() != 0:\n return\n assert self.is_train is not None, \"self.is_train should be set by the callback\"\n # TODO(anis) -- in #120, len(state.eval_dataloader) is inaccurate, as it does not incorporate\n # trainer._eval_subset_num_batches. The evaluator spec should fix this.\n total_steps = state.steps_per_epoch if self.is_train else len(state.eval_dataloader)\n desc = f'Epoch {int(state.timer.epoch)}'\n position = 0 if self.is_train else 1\n if not self.is_train:\n desc += f\", Batch {int(state.timer.batch)} (val)\"\n self.pbars[self.is_train] = _TQDMLoggerInstance(\n _TQDMLoggerInstanceState(total=total_steps,\n position=position,\n n=0,\n keys_to_log=_IS_TRAIN_TO_KEYS_TO_LOG[self.is_train],\n description=desc,\n epoch_metrics={}))\n\n def epoch_start(self, state: State, logger: Logger) -> None:\n del logger # unused\n if dist.get_global_rank() != 0:\n return\n self.is_train = True\n self._start(state)\n\n def eval_start(self, state: State, logger: Logger) -> None:\n del logger # unused\n if dist.get_global_rank() != 0:\n return\n self.is_train = False\n self._start(state)\n\n def _update(self):\n if dist.get_global_rank() != 0:\n return\n if self.is_train in self.pbars:\n assert self.is_train is not None\n self.pbars[self.is_train].update()\n\n def batch_end(self, state: State, logger: Logger) -> None:\n del state, logger # unused\n if dist.get_global_rank() != 0:\n return\n self._update()\n\n def eval_after_forward(self, state: State, logger: Logger) -> None:\n del state, logger # unused\n if dist.get_global_rank() != 0:\n return\n self._update()\n\n def _end(self):\n if dist.get_global_rank() != 0:\n return\n if self.is_train in self.pbars:\n assert self.is_train is not None\n self.pbars[self.is_train].close()\n del self.pbars[self.is_train]\n self.is_train = None\n\n def epoch_end(self, state: State, logger: Logger) -> None:\n del state, logger # unused\n if dist.get_global_rank() != 0:\n return\n self._end()\n\n def eval_end(self, state: State, logger: Logger) -> None:\n del state, logger # unused\n if dist.get_global_rank() != 0:\n return\n self._end()\n\n def state_dict(self) -> StateDict:\n return {\n \"pbars\": {k: v.state_dict() for (k, v) in self.pbars.items()},\n \"is_train\": self.is_train,\n }\n\n def load_state_dict(self, state: StateDict) -> None:\n self.pbars = {k: _TQDMLoggerInstance(**v) for (k, v) in state[\"pbars\"].items()}\n self.is_train = state[\"is_train\"]\n", "path": "composer/loggers/tqdm_logger.py"}]} | 2,831 | 528 |
gh_patches_debug_35925 | rasdani/github-patches | git_diff | easybuilders__easybuild-framework-3424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RPath compiler wrapper breaking precompiled header builds
This issue was found when building OpenCV-4.0.1-foss-2018b-Python-3.6.6.eb with EB 4.1.1. The build has multiple errors `undefined reference to main`. This is caused by the rpath wrappers adding `-Wl,-rpath=...` options to the g++ call, which turns otherwise compile-only commands to generate precompiled headers (`-x c++-header`) into commands that link.
See [easybuild-OpenCV-4.0.1-20200623.143513.mcEaZ.log.gz](https://github.com/easybuilders/easybuild-framework/files/4819395/easybuild-OpenCV-4.0.1-20200623.143513.mcEaZ.log.gz) for the full logs. But it can also be shown using only the generated rpath wrapper to compile a simple file:
```
paulm@tcn180 14:43 ~$ m list
Currently Loaded Modulefiles:
1) surfsara 2) 2019 3) EasyBuild/4.1.1 4) compilerwrappers 5) eb/4.1.1(default) 6) GCCcore/8.2.0 7) zlib/1.2.11-GCCcore-8.2.0 8) binutils/2.31.1-GCCcore-8.2.0 9) GCC/8.2.0-2.31.1
paulm@tcn180:~$ cat t.cc
#include <cstdio>
# Straight g++ call, no wrappers, succeeds
paulm@tcn180 14:45 ~$ /sw/arch/RedHatEnterpriseServer7/EB_production/2019/software/GCCcore/8.2.0/bin/g++ -x c++-header t.cc
paulm@tcn180 14:45 ~$
# Wrapped call, fails
paulm@tcn180 14:45 ~$ /scratch-shared/paulm/eb-3OiLzK/tmpvR7vdm/rpath_wrappers/gxx_wrapper/g++ -x c++-header t.cc
/lib/../lib64/crt1.o(.text+0x20): error: undefined reference to 'main'
collect2: error: ld returned 1 exit status
# Straight g++ call with -Wl,-rpath addition, no wrappers, fails
paulm@tcn180 14:46 ~$ /sw/arch/RedHatEnterpriseServer7/EB_production/2019/software/GCCcore/8.2.0/bin/g++ -x c++-header -Wl,-rpath=/tmp/doh t.cc
/lib/../lib64/crt1.o(.text+0x20): error: undefined reference to 'main'
collect2: error: ld returned 1 exit status
```
We (me and @casparvl) suggest that the rpath wrappers check for the `-x c++-header` (and similar) options to make sure an otherwise correct call of g++ is not turned into one that fails. Note that there is a bit of a mist as to the expected behaviour of GCC with various options, as the docs aren't entirely clear:
* For example, calling `g++ t.cc` fails if `t.cc` does not contain a main function as it does linking of the generated object code. But calling `g++ -x c++-header t.cc` apparently turns on compile-only mode and succeeds (but the [gcc docs](https://gcc.gnu.org/onlinedocs/gcc/Precompiled-Headers.html) only say: "To create a precompiled header file, simply compile it as you would any other file, if necessary using the -x option to make the driver treat it as a C or C++ header file")
* Adding `-Wl,-rpath=...` makes GCC link (although the docs do not specify this), unless `-c` is used, in which case linking is never done.
* The [examples in the GCC docs of using precompiled headers](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_headers.html) do not use `-c` when using `-x c++-header`. But upstream package could be more explicit by using `-c -x c++-header`, which would still work when the RPATH wrappers would add `-Wl,-rpath=...`, but it can't really be expected of those packages to handle that case of downstream building.
</issue>
<code>
[start of easybuild/scripts/rpath_args.py]
1 #!/usr/bin/env python
2 ##
3 # Copyright 2016-2020 Ghent University
4 #
5 # This file is part of EasyBuild,
6 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
7 # with support of Ghent University (http://ugent.be/hpc),
8 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
9 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
10 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
11 #
12 # https://github.com/easybuilders/easybuild
13 #
14 # EasyBuild is free software: you can redistribute it and/or modify
15 # it under the terms of the GNU General Public License as published by
16 # the Free Software Foundation v2.
17 #
18 # EasyBuild is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
22 #
23 # You should have received a copy of the GNU General Public License
24 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
25 ##
26 """
27 Utility script used by RPATH wrapper script;
28 output is statements that define the following environment variables
29 * $CMD_ARGS: new list of command line arguments to pass
30 * $RPATH_ARGS: command line option to specify list of paths to RPATH
31
32 author: Kenneth Hoste (HPC-UGent)
33 """
34 import os
35 import re
36 import sys
37
38
39 cmd = sys.argv[1]
40 rpath_filter = sys.argv[2]
41 rpath_include = sys.argv[3]
42 args = sys.argv[4:]
43
44 # wheter or not to use -Wl to pass options to the linker
45 if cmd in ['ld', 'ld.gold', 'ld.bfd']:
46 flag_prefix = ''
47 else:
48 flag_prefix = '-Wl,'
49
50 rpath_filter = rpath_filter.split(',')
51 if rpath_filter:
52 rpath_filter = re.compile('^%s$' % '|'.join(rpath_filter))
53 else:
54 rpath_filter = None
55
56 if rpath_include:
57 rpath_include = rpath_include.split(',')
58 else:
59 rpath_include = []
60
61 version_mode = False
62 cmd_args, cmd_args_rpath = [], []
63
64 # process list of original command line arguments
65 idx = 0
66 while idx < len(args):
67
68 arg = args[idx]
69
70 # if command is run in 'version check' mode, make sure we don't include *any* -rpath arguments
71 if arg in ['-v', '-V', '--version', '-dumpversion']:
72 version_mode = True
73 cmd_args.append(arg)
74
75 # FIXME: also consider $LIBRARY_PATH?
76 # FIXME: support to hard inject additional library paths?
77 # FIXME: support to specify list of path prefixes that should not be RPATH'ed into account?
78 # FIXME skip paths in /tmp, build dir, etc.?
79
80 # handle -L flags, inject corresponding -rpath flag
81 elif arg.startswith('-L'):
82 # take into account that argument to -L may be separated with one or more spaces...
83 if arg == '-L':
84 # actual library path is next argument when arg='-L'
85 idx += 1
86 lib_path = args[idx]
87 else:
88 lib_path = arg[2:]
89
90 if os.path.isabs(lib_path) and (rpath_filter is None or not rpath_filter.match(lib_path)):
91 # inject -rpath flag in front for every -L with an absolute path,
92 # also retain the -L flag (without reordering!)
93 cmd_args_rpath.append(flag_prefix + '-rpath=%s' % lib_path)
94 cmd_args.append('-L%s' % lib_path)
95 else:
96 # don't RPATH in relative paths;
97 # it doesn't make much sense, and it can also break the build because it may result in reordering lib paths
98 cmd_args.append('-L%s' % lib_path)
99
100 # replace --enable-new-dtags with --disable-new-dtags if it's used;
101 # --enable-new-dtags would result in copying rpath to runpath,
102 # meaning that $LD_LIBRARY_PATH is taken into account again;
103 # --enable-new-dtags is not removed but replaced to prevent issues when linker flag is forwarded from the compiler
104 # to the linker with an extra prefixed flag (either -Xlinker or -Wl,).
105 # In that case, the compiler would erroneously pass the next random argument to the linker.
106 elif arg == flag_prefix + '--enable-new-dtags':
107 cmd_args.append(flag_prefix + '--disable-new-dtags')
108 else:
109 cmd_args.append(arg)
110
111 idx += 1
112
113 # add -rpath flags in front
114 cmd_args = cmd_args_rpath + cmd_args
115
116 cmd_args_rpath = [flag_prefix + '-rpath=%s' % inc for inc in rpath_include]
117
118 if not version_mode:
119 cmd_args = cmd_args_rpath + [
120 # try to make sure that RUNPATH is not used by always injecting --disable-new-dtags
121 flag_prefix + '--disable-new-dtags',
122 ] + cmd_args
123
124 # wrap all arguments into single quotes to avoid further bash expansion
125 cmd_args = ["'%s'" % a.replace("'", "''") for a in cmd_args]
126
127 # output: statement to define $CMD_ARGS and $RPATH_ARGS
128 print("CMD_ARGS=(%s)" % ' '.join(cmd_args))
129
[end of easybuild/scripts/rpath_args.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/scripts/rpath_args.py b/easybuild/scripts/rpath_args.py
--- a/easybuild/scripts/rpath_args.py
+++ b/easybuild/scripts/rpath_args.py
@@ -41,7 +41,7 @@
rpath_include = sys.argv[3]
args = sys.argv[4:]
-# wheter or not to use -Wl to pass options to the linker
+# determine whether or not to use -Wl to pass options to the linker based on name of command
if cmd in ['ld', 'ld.gold', 'ld.bfd']:
flag_prefix = ''
else:
@@ -58,7 +58,7 @@
else:
rpath_include = []
-version_mode = False
+add_rpath_args = True
cmd_args, cmd_args_rpath = [], []
# process list of original command line arguments
@@ -69,7 +69,16 @@
# if command is run in 'version check' mode, make sure we don't include *any* -rpath arguments
if arg in ['-v', '-V', '--version', '-dumpversion']:
- version_mode = True
+ add_rpath_args = False
+ cmd_args.append(arg)
+
+ # compiler options like "-x c++header" imply no linking is done (similar to -c),
+ # so then we must not inject -Wl,-rpath option since they *enable* linking;
+ # see https://github.com/easybuilders/easybuild-framework/issues/3371
+ elif arg == '-x':
+ idx_next = idx + 1
+ if idx_next < len(args) and args[idx_next] in ['c-header', 'c++-header']:
+ add_rpath_args = False
cmd_args.append(arg)
# FIXME: also consider $LIBRARY_PATH?
@@ -110,16 +119,15 @@
idx += 1
-# add -rpath flags in front
-cmd_args = cmd_args_rpath + cmd_args
+if add_rpath_args:
+ # try to make sure that RUNPATH is not used by always injecting --disable-new-dtags
+ cmd_args_rpath.insert(0, flag_prefix + '--disable-new-dtags')
-cmd_args_rpath = [flag_prefix + '-rpath=%s' % inc for inc in rpath_include]
+ # add -rpath options for paths listed in rpath_include
+ cmd_args_rpath = [flag_prefix + '-rpath=%s' % inc for inc in rpath_include] + cmd_args_rpath
-if not version_mode:
- cmd_args = cmd_args_rpath + [
- # try to make sure that RUNPATH is not used by always injecting --disable-new-dtags
- flag_prefix + '--disable-new-dtags',
- ] + cmd_args
+ # add -rpath flags in front
+ cmd_args = cmd_args_rpath + cmd_args
# wrap all arguments into single quotes to avoid further bash expansion
cmd_args = ["'%s'" % a.replace("'", "''") for a in cmd_args]
| {"golden_diff": "diff --git a/easybuild/scripts/rpath_args.py b/easybuild/scripts/rpath_args.py\n--- a/easybuild/scripts/rpath_args.py\n+++ b/easybuild/scripts/rpath_args.py\n@@ -41,7 +41,7 @@\n rpath_include = sys.argv[3]\n args = sys.argv[4:]\n \n-# wheter or not to use -Wl to pass options to the linker\n+# determine whether or not to use -Wl to pass options to the linker based on name of command\n if cmd in ['ld', 'ld.gold', 'ld.bfd']:\n flag_prefix = ''\n else:\n@@ -58,7 +58,7 @@\n else:\n rpath_include = []\n \n-version_mode = False\n+add_rpath_args = True\n cmd_args, cmd_args_rpath = [], []\n \n # process list of original command line arguments\n@@ -69,7 +69,16 @@\n \n # if command is run in 'version check' mode, make sure we don't include *any* -rpath arguments\n if arg in ['-v', '-V', '--version', '-dumpversion']:\n- version_mode = True\n+ add_rpath_args = False\n+ cmd_args.append(arg)\n+\n+ # compiler options like \"-x c++header\" imply no linking is done (similar to -c),\n+ # so then we must not inject -Wl,-rpath option since they *enable* linking;\n+ # see https://github.com/easybuilders/easybuild-framework/issues/3371\n+ elif arg == '-x':\n+ idx_next = idx + 1\n+ if idx_next < len(args) and args[idx_next] in ['c-header', 'c++-header']:\n+ add_rpath_args = False\n cmd_args.append(arg)\n \n # FIXME: also consider $LIBRARY_PATH?\n@@ -110,16 +119,15 @@\n \n idx += 1\n \n-# add -rpath flags in front\n-cmd_args = cmd_args_rpath + cmd_args\n+if add_rpath_args:\n+ # try to make sure that RUNPATH is not used by always injecting --disable-new-dtags\n+ cmd_args_rpath.insert(0, flag_prefix + '--disable-new-dtags')\n \n-cmd_args_rpath = [flag_prefix + '-rpath=%s' % inc for inc in rpath_include]\n+ # add -rpath options for paths listed in rpath_include\n+ cmd_args_rpath = [flag_prefix + '-rpath=%s' % inc for inc in rpath_include] + cmd_args_rpath\n \n-if not version_mode:\n- cmd_args = cmd_args_rpath + [\n- # try to make sure that RUNPATH is not used by always injecting --disable-new-dtags\n- flag_prefix + '--disable-new-dtags',\n- ] + cmd_args\n+ # add -rpath flags in front\n+ cmd_args = cmd_args_rpath + cmd_args\n \n # wrap all arguments into single quotes to avoid further bash expansion\n cmd_args = [\"'%s'\" % a.replace(\"'\", \"''\") for a in cmd_args]\n", "issue": "RPath compiler wrapper breaking precompiled header builds\nThis issue was found when building OpenCV-4.0.1-foss-2018b-Python-3.6.6.eb with EB 4.1.1. The build has multiple errors `undefined reference to main`. This is caused by the rpath wrappers adding `-Wl,-rpath=...` options to the g++ call, which turns otherwise compile-only commands to generate precompiled headers (`-x c++-header`) into commands that link. \r\n\r\nSee [easybuild-OpenCV-4.0.1-20200623.143513.mcEaZ.log.gz](https://github.com/easybuilders/easybuild-framework/files/4819395/easybuild-OpenCV-4.0.1-20200623.143513.mcEaZ.log.gz) for the full logs. But it can also be shown using only the generated rpath wrapper to compile a simple file:\r\n\r\n```\r\npaulm@tcn180 14:43 ~$ m list\r\nCurrently Loaded Modulefiles:\r\n 1) surfsara 2) 2019 3) EasyBuild/4.1.1 4) compilerwrappers 5) eb/4.1.1(default) 6) GCCcore/8.2.0 7) zlib/1.2.11-GCCcore-8.2.0 8) binutils/2.31.1-GCCcore-8.2.0 9) GCC/8.2.0-2.31.1 \r\n\r\npaulm@tcn180:~$ cat t.cc\r\n#include <cstdio>\r\n\r\n# Straight g++ call, no wrappers, succeeds\r\npaulm@tcn180 14:45 ~$ /sw/arch/RedHatEnterpriseServer7/EB_production/2019/software/GCCcore/8.2.0/bin/g++ -x c++-header t.cc\r\npaulm@tcn180 14:45 ~$ \r\n\r\n# Wrapped call, fails\r\npaulm@tcn180 14:45 ~$ /scratch-shared/paulm/eb-3OiLzK/tmpvR7vdm/rpath_wrappers/gxx_wrapper/g++ -x c++-header t.cc \r\n/lib/../lib64/crt1.o(.text+0x20): error: undefined reference to 'main'\r\ncollect2: error: ld returned 1 exit status\r\n\r\n# Straight g++ call with -Wl,-rpath addition, no wrappers, fails\r\npaulm@tcn180 14:46 ~$ /sw/arch/RedHatEnterpriseServer7/EB_production/2019/software/GCCcore/8.2.0/bin/g++ -x c++-header -Wl,-rpath=/tmp/doh t.cc\r\n/lib/../lib64/crt1.o(.text+0x20): error: undefined reference to 'main'\r\ncollect2: error: ld returned 1 exit status\r\n```\r\n\r\nWe (me and @casparvl) suggest that the rpath wrappers check for the `-x c++-header` (and similar) options to make sure an otherwise correct call of g++ is not turned into one that fails. Note that there is a bit of a mist as to the expected behaviour of GCC with various options, as the docs aren't entirely clear:\r\n\r\n* For example, calling `g++ t.cc` fails if `t.cc` does not contain a main function as it does linking of the generated object code. But calling `g++ -x c++-header t.cc` apparently turns on compile-only mode and succeeds (but the [gcc docs](https://gcc.gnu.org/onlinedocs/gcc/Precompiled-Headers.html) only say: \"To create a precompiled header file, simply compile it as you would any other file, if necessary using the -x option to make the driver treat it as a C or C++ header file\")\r\n* Adding `-Wl,-rpath=...` makes GCC link (although the docs do not specify this), unless `-c` is used, in which case linking is never done.\r\n* The [examples in the GCC docs of using precompiled headers](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_headers.html) do not use `-c` when using `-x c++-header`. But upstream package could be more explicit by using `-c -x c++-header`, which would still work when the RPATH wrappers would add `-Wl,-rpath=...`, but it can't really be expected of those packages to handle that case of downstream building.\n", "before_files": [{"content": "#!/usr/bin/env python\n##\n# Copyright 2016-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nUtility script used by RPATH wrapper script;\noutput is statements that define the following environment variables\n* $CMD_ARGS: new list of command line arguments to pass\n* $RPATH_ARGS: command line option to specify list of paths to RPATH\n\nauthor: Kenneth Hoste (HPC-UGent)\n\"\"\"\nimport os\nimport re\nimport sys\n\n\ncmd = sys.argv[1]\nrpath_filter = sys.argv[2]\nrpath_include = sys.argv[3]\nargs = sys.argv[4:]\n\n# wheter or not to use -Wl to pass options to the linker\nif cmd in ['ld', 'ld.gold', 'ld.bfd']:\n flag_prefix = ''\nelse:\n flag_prefix = '-Wl,'\n\nrpath_filter = rpath_filter.split(',')\nif rpath_filter:\n rpath_filter = re.compile('^%s$' % '|'.join(rpath_filter))\nelse:\n rpath_filter = None\n\nif rpath_include:\n rpath_include = rpath_include.split(',')\nelse:\n rpath_include = []\n\nversion_mode = False\ncmd_args, cmd_args_rpath = [], []\n\n# process list of original command line arguments\nidx = 0\nwhile idx < len(args):\n\n arg = args[idx]\n\n # if command is run in 'version check' mode, make sure we don't include *any* -rpath arguments\n if arg in ['-v', '-V', '--version', '-dumpversion']:\n version_mode = True\n cmd_args.append(arg)\n\n # FIXME: also consider $LIBRARY_PATH?\n # FIXME: support to hard inject additional library paths?\n # FIXME: support to specify list of path prefixes that should not be RPATH'ed into account?\n # FIXME skip paths in /tmp, build dir, etc.?\n\n # handle -L flags, inject corresponding -rpath flag\n elif arg.startswith('-L'):\n # take into account that argument to -L may be separated with one or more spaces...\n if arg == '-L':\n # actual library path is next argument when arg='-L'\n idx += 1\n lib_path = args[idx]\n else:\n lib_path = arg[2:]\n\n if os.path.isabs(lib_path) and (rpath_filter is None or not rpath_filter.match(lib_path)):\n # inject -rpath flag in front for every -L with an absolute path,\n # also retain the -L flag (without reordering!)\n cmd_args_rpath.append(flag_prefix + '-rpath=%s' % lib_path)\n cmd_args.append('-L%s' % lib_path)\n else:\n # don't RPATH in relative paths;\n # it doesn't make much sense, and it can also break the build because it may result in reordering lib paths\n cmd_args.append('-L%s' % lib_path)\n\n # replace --enable-new-dtags with --disable-new-dtags if it's used;\n # --enable-new-dtags would result in copying rpath to runpath,\n # meaning that $LD_LIBRARY_PATH is taken into account again;\n # --enable-new-dtags is not removed but replaced to prevent issues when linker flag is forwarded from the compiler\n # to the linker with an extra prefixed flag (either -Xlinker or -Wl,).\n # In that case, the compiler would erroneously pass the next random argument to the linker.\n elif arg == flag_prefix + '--enable-new-dtags':\n cmd_args.append(flag_prefix + '--disable-new-dtags')\n else:\n cmd_args.append(arg)\n\n idx += 1\n\n# add -rpath flags in front\ncmd_args = cmd_args_rpath + cmd_args\n\ncmd_args_rpath = [flag_prefix + '-rpath=%s' % inc for inc in rpath_include]\n\nif not version_mode:\n cmd_args = cmd_args_rpath + [\n # try to make sure that RUNPATH is not used by always injecting --disable-new-dtags\n flag_prefix + '--disable-new-dtags',\n ] + cmd_args\n\n# wrap all arguments into single quotes to avoid further bash expansion\ncmd_args = [\"'%s'\" % a.replace(\"'\", \"''\") for a in cmd_args]\n\n# output: statement to define $CMD_ARGS and $RPATH_ARGS\nprint(\"CMD_ARGS=(%s)\" % ' '.join(cmd_args))\n", "path": "easybuild/scripts/rpath_args.py"}]} | 3,069 | 689 |
gh_patches_debug_3221 | rasdani/github-patches | git_diff | spack__spack-1961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`spack uninstall -f` is broken
@alalazo It looks like PR #670 might have broken "spack uninstall -f". Can you please look into it?
```
[me@ankeli icebin]$ spack uninstall -f openblas
==> The following packages will be uninstalled :
==> Do you want to proceed ? [y/n]
y
[me@ankeli icebin]$ ls /home/rpfische/spack/opt/spack/linux-x86_64/gcc-4.9.3/openblas-0.2.17-lvi5fks7zompzclvc2lyzwilfefal4ld/lib
blas.a cmake libblas.a liblapack.a libopenblas.a libopenblas_nehalemp-r0.2.17.a
```
`spack uninstall -f` is broken
@alalazo It looks like PR #670 might have broken "spack uninstall -f". Can you please look into it?
```
[me@ankeli icebin]$ spack uninstall -f openblas
==> The following packages will be uninstalled :
==> Do you want to proceed ? [y/n]
y
[me@ankeli icebin]$ ls /home/rpfische/spack/opt/spack/linux-x86_64/gcc-4.9.3/openblas-0.2.17-lvi5fks7zompzclvc2lyzwilfefal4ld/lib
blas.a cmake libblas.a liblapack.a libopenblas.a libopenblas_nehalemp-r0.2.17.a
```
</issue>
<code>
[start of lib/spack/spack/cmd/uninstall.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from __future__ import print_function
26
27 import argparse
28
29 import llnl.util.tty as tty
30 import spack
31 import spack.cmd
32 import spack.repository
33
34 description = "Remove an installed package"
35
36 error_message = """You can either:
37 a) Use a more specific spec, or
38 b) use spack uninstall -a to uninstall ALL matching specs.
39 """
40
41 # Arguments for display_specs when we find ambiguity
42 display_args = {
43 'long': True,
44 'show_flags': True,
45 'variants': True
46 }
47
48
49 def setup_parser(subparser):
50 subparser.add_argument(
51 '-f', '--force', action='store_true', dest='force',
52 help="Remove regardless of whether other packages depend on this one.")
53
54 subparser.add_argument(
55 '-a', '--all', action='store_true', dest='all',
56 help="USE CAREFULLY. Remove ALL installed packages that match each "
57 "supplied spec. i.e., if you say uninstall libelf, ALL versions "
58 "of libelf are uninstalled. This is both useful and dangerous, "
59 "like rm -r.")
60
61 subparser.add_argument(
62 '-d', '--dependents', action='store_true', dest='dependents',
63 help='Also uninstall any packages that depend on the ones given '
64 'via command line.')
65
66 subparser.add_argument(
67 '-y', '--yes-to-all', action='store_true', dest='yes_to_all',
68 help='Assume "yes" is the answer to every confirmation requested')
69
70 subparser.add_argument(
71 'packages',
72 nargs=argparse.REMAINDER,
73 help="specs of packages to uninstall")
74
75
76 def concretize_specs(specs, allow_multiple_matches=False, force=False):
77 """Returns a list of specs matching the non necessarily
78 concretized specs given from cli
79
80 Args:
81 specs: list of specs to be matched against installed packages
82 allow_multiple_matches : if True multiple matches are admitted
83
84 Return:
85 list of specs
86 """
87 # List of specs that match expressions given via command line
88 specs_from_cli = []
89 has_errors = False
90 for spec in specs:
91 matching = spack.installed_db.query(spec)
92 # For each spec provided, make sure it refers to only one package.
93 # Fail and ask user to be unambiguous if it doesn't
94 if not allow_multiple_matches and len(matching) > 1:
95 tty.error("%s matches multiple packages:" % spec)
96 print()
97 spack.cmd.display_specs(matching, **display_args)
98 print()
99 has_errors = True
100
101 # No installed package matches the query
102 if len(matching) == 0 and not force:
103 tty.error("%s does not match any installed packages." % spec)
104 has_errors = True
105
106 specs_from_cli.extend(matching)
107 if has_errors:
108 tty.die(error_message)
109
110 return specs_from_cli
111
112
113 def installed_dependents(specs):
114 """Returns a dictionary that maps a spec with a list of its
115 installed dependents
116
117 Args:
118 specs: list of specs to be checked for dependents
119
120 Returns:
121 dictionary of installed dependents
122 """
123 dependents = {}
124 for item in specs:
125 lst = [x for x in item.package.installed_dependents if x not in specs]
126 if lst:
127 lst = list(set(lst))
128 dependents[item] = lst
129 return dependents
130
131
132 def do_uninstall(specs, force):
133 """
134 Uninstalls all the specs in a list.
135
136 Args:
137 specs: list of specs to be uninstalled
138 force: force uninstallation (boolean)
139 """
140 packages = []
141 for item in specs:
142 try:
143 # should work if package is known to spack
144 packages.append(item.package)
145 except spack.repository.UnknownPackageError:
146 # The package.py file has gone away -- but still
147 # want to uninstall.
148 spack.Package(item).do_uninstall(force=True)
149
150 # Sort packages to be uninstalled by the number of installed dependents
151 # This ensures we do things in the right order
152 def num_installed_deps(pkg):
153 return len(pkg.installed_dependents)
154
155 packages.sort(key=num_installed_deps)
156 for item in packages:
157 item.do_uninstall(force=force)
158
159
160 def uninstall(parser, args):
161 if not args.packages:
162 tty.die("uninstall requires at least one package argument.")
163
164 with spack.installed_db.write_transaction():
165 specs = spack.cmd.parse_specs(args.packages)
166 # Gets the list of installed specs that match the ones give via cli
167 # takes care of '-a' is given in the cli
168 uninstall_list = concretize_specs(specs, args.all, args.force)
169 dependent_list = installed_dependents(
170 uninstall_list) # takes care of '-d'
171
172 # Process dependent_list and update uninstall_list
173 has_error = False
174 if dependent_list and not args.dependents and not args.force:
175 for spec, lst in dependent_list.items():
176 tty.error("Will not uninstall %s" %
177 spec.format("$_$@$%@$#", color=True))
178 print('')
179 print("The following packages depend on it:")
180 spack.cmd.display_specs(lst, **display_args)
181 print('')
182 has_error = True
183 elif args.dependents:
184 for key, lst in dependent_list.items():
185 uninstall_list.extend(lst)
186 uninstall_list = list(set(uninstall_list))
187
188 if has_error:
189 tty.die('You can use spack uninstall --dependents '
190 'to uninstall these dependencies as well')
191
192 if not args.yes_to_all:
193 tty.msg("The following packages will be uninstalled : ")
194 print('')
195 spack.cmd.display_specs(uninstall_list, **display_args)
196 print('')
197 spack.cmd.ask_for_confirmation('Do you want to proceed ? ')
198
199 # Uninstall everything on the list
200 do_uninstall(uninstall_list, args.force)
201
[end of lib/spack/spack/cmd/uninstall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py
--- a/lib/spack/spack/cmd/uninstall.py
+++ b/lib/spack/spack/cmd/uninstall.py
@@ -99,7 +99,7 @@
has_errors = True
# No installed package matches the query
- if len(matching) == 0 and not force:
+ if len(matching) == 0:
tty.error("%s does not match any installed packages." % spec)
has_errors = True
| {"golden_diff": "diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py\n--- a/lib/spack/spack/cmd/uninstall.py\n+++ b/lib/spack/spack/cmd/uninstall.py\n@@ -99,7 +99,7 @@\n has_errors = True\n \n # No installed package matches the query\n- if len(matching) == 0 and not force:\n+ if len(matching) == 0:\n tty.error(\"%s does not match any installed packages.\" % spec)\n has_errors = True\n", "issue": "`spack uninstall -f` is broken\n@alalazo It looks like PR #670 might have broken \"spack uninstall -f\". Can you please look into it?\n\n```\n[me@ankeli icebin]$ spack uninstall -f openblas\n==> The following packages will be uninstalled : \n\n\n==> Do you want to proceed ? [y/n]\ny\n[me@ankeli icebin]$ ls /home/rpfische/spack/opt/spack/linux-x86_64/gcc-4.9.3/openblas-0.2.17-lvi5fks7zompzclvc2lyzwilfefal4ld/lib\nblas.a cmake libblas.a liblapack.a libopenblas.a libopenblas_nehalemp-r0.2.17.a\n```\n\n`spack uninstall -f` is broken\n@alalazo It looks like PR #670 might have broken \"spack uninstall -f\". Can you please look into it?\n\n```\n[me@ankeli icebin]$ spack uninstall -f openblas\n==> The following packages will be uninstalled : \n\n\n==> Do you want to proceed ? [y/n]\ny\n[me@ankeli icebin]$ ls /home/rpfische/spack/opt/spack/linux-x86_64/gcc-4.9.3/openblas-0.2.17-lvi5fks7zompzclvc2lyzwilfefal4ld/lib\nblas.a cmake libblas.a liblapack.a libopenblas.a libopenblas_nehalemp-r0.2.17.a\n```\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom __future__ import print_function\n\nimport argparse\n\nimport llnl.util.tty as tty\nimport spack\nimport spack.cmd\nimport spack.repository\n\ndescription = \"Remove an installed package\"\n\nerror_message = \"\"\"You can either:\n a) Use a more specific spec, or\n b) use spack uninstall -a to uninstall ALL matching specs.\n\"\"\"\n\n# Arguments for display_specs when we find ambiguity\ndisplay_args = {\n 'long': True,\n 'show_flags': True,\n 'variants': True\n}\n\n\ndef setup_parser(subparser):\n subparser.add_argument(\n '-f', '--force', action='store_true', dest='force',\n help=\"Remove regardless of whether other packages depend on this one.\")\n\n subparser.add_argument(\n '-a', '--all', action='store_true', dest='all',\n help=\"USE CAREFULLY. Remove ALL installed packages that match each \"\n \"supplied spec. i.e., if you say uninstall libelf, ALL versions \"\n \"of libelf are uninstalled. This is both useful and dangerous, \"\n \"like rm -r.\")\n\n subparser.add_argument(\n '-d', '--dependents', action='store_true', dest='dependents',\n help='Also uninstall any packages that depend on the ones given '\n 'via command line.')\n\n subparser.add_argument(\n '-y', '--yes-to-all', action='store_true', dest='yes_to_all',\n help='Assume \"yes\" is the answer to every confirmation requested')\n\n subparser.add_argument(\n 'packages',\n nargs=argparse.REMAINDER,\n help=\"specs of packages to uninstall\")\n\n\ndef concretize_specs(specs, allow_multiple_matches=False, force=False):\n \"\"\"Returns a list of specs matching the non necessarily\n concretized specs given from cli\n\n Args:\n specs: list of specs to be matched against installed packages\n allow_multiple_matches : if True multiple matches are admitted\n\n Return:\n list of specs\n \"\"\"\n # List of specs that match expressions given via command line\n specs_from_cli = []\n has_errors = False\n for spec in specs:\n matching = spack.installed_db.query(spec)\n # For each spec provided, make sure it refers to only one package.\n # Fail and ask user to be unambiguous if it doesn't\n if not allow_multiple_matches and len(matching) > 1:\n tty.error(\"%s matches multiple packages:\" % spec)\n print()\n spack.cmd.display_specs(matching, **display_args)\n print()\n has_errors = True\n\n # No installed package matches the query\n if len(matching) == 0 and not force:\n tty.error(\"%s does not match any installed packages.\" % spec)\n has_errors = True\n\n specs_from_cli.extend(matching)\n if has_errors:\n tty.die(error_message)\n\n return specs_from_cli\n\n\ndef installed_dependents(specs):\n \"\"\"Returns a dictionary that maps a spec with a list of its\n installed dependents\n\n Args:\n specs: list of specs to be checked for dependents\n\n Returns:\n dictionary of installed dependents\n \"\"\"\n dependents = {}\n for item in specs:\n lst = [x for x in item.package.installed_dependents if x not in specs]\n if lst:\n lst = list(set(lst))\n dependents[item] = lst\n return dependents\n\n\ndef do_uninstall(specs, force):\n \"\"\"\n Uninstalls all the specs in a list.\n\n Args:\n specs: list of specs to be uninstalled\n force: force uninstallation (boolean)\n \"\"\"\n packages = []\n for item in specs:\n try:\n # should work if package is known to spack\n packages.append(item.package)\n except spack.repository.UnknownPackageError:\n # The package.py file has gone away -- but still\n # want to uninstall.\n spack.Package(item).do_uninstall(force=True)\n\n # Sort packages to be uninstalled by the number of installed dependents\n # This ensures we do things in the right order\n def num_installed_deps(pkg):\n return len(pkg.installed_dependents)\n\n packages.sort(key=num_installed_deps)\n for item in packages:\n item.do_uninstall(force=force)\n\n\ndef uninstall(parser, args):\n if not args.packages:\n tty.die(\"uninstall requires at least one package argument.\")\n\n with spack.installed_db.write_transaction():\n specs = spack.cmd.parse_specs(args.packages)\n # Gets the list of installed specs that match the ones give via cli\n # takes care of '-a' is given in the cli\n uninstall_list = concretize_specs(specs, args.all, args.force)\n dependent_list = installed_dependents(\n uninstall_list) # takes care of '-d'\n\n # Process dependent_list and update uninstall_list\n has_error = False\n if dependent_list and not args.dependents and not args.force:\n for spec, lst in dependent_list.items():\n tty.error(\"Will not uninstall %s\" %\n spec.format(\"$_$@$%@$#\", color=True))\n print('')\n print(\"The following packages depend on it:\")\n spack.cmd.display_specs(lst, **display_args)\n print('')\n has_error = True\n elif args.dependents:\n for key, lst in dependent_list.items():\n uninstall_list.extend(lst)\n uninstall_list = list(set(uninstall_list))\n\n if has_error:\n tty.die('You can use spack uninstall --dependents '\n 'to uninstall these dependencies as well')\n\n if not args.yes_to_all:\n tty.msg(\"The following packages will be uninstalled : \")\n print('')\n spack.cmd.display_specs(uninstall_list, **display_args)\n print('')\n spack.cmd.ask_for_confirmation('Do you want to proceed ? ')\n\n # Uninstall everything on the list\n do_uninstall(uninstall_list, args.force)\n", "path": "lib/spack/spack/cmd/uninstall.py"}]} | 2,985 | 121 |
gh_patches_debug_62174 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2617 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Samples from LKJCorrCholesky have the wrong shape
### Issue Description
The batch and sample shapes for `LKJCorrCholesky` are swapped when sampling.
### Environment
OS: Ubuntu 16.04
Python: 3.7.2
Torch version: 1.6.0
Pyro version: 1.4.0
### Code Snippet
Minimal example:
```python
>>> dist = pyro.distributions.LKJCorrCholesky(d=3, eta=torch.ones(())).expand(12)
>>> # batch shape and event shape are as you'd expect
>>> dist.batch_shape
torch.Size([12])
>>> dist.event_shape
torch.Size([3, 3])
>>> # samples have correct shape when sample_shape=()
>>> dist.shape(())
torch.Size([12, 3, 3])
>>> dist.sample().shape
torch.Size([12, 3, 3])
>>> # samples have the wrong shape when sample_shape is non-unit
>>> dist.shape((4,)) # as expected
torch.Size([4, 12, 3, 3])
>>> dist.sample((4,)).shape
torch.Size([12, 4, 3, 3])
```
I think this line is the culprit: https://github.com/pyro-ppl/pyro/blob/7c2c22c10dffda8a33ffbd593cc8d58819959e40/pyro/distributions/lkj.py#L67
```python
def sample(self, sample_shape=torch.Size()):
y = self._gen.sample(sample_shape=self.batch_shape + sample_shape).detach()
z = y.mul(2).add(-1.0)
return _vector_to_l_cholesky(z)
```
should be
```python
def sample(self, sample_shape=torch.Size()):
y = self._gen.sample(sample_shape=sample_shape + self.batch_shape).detach()
z = y.mul(2).add(-1.0)
return _vector_to_l_cholesky(z)
```
</issue>
<code>
[start of pyro/distributions/lkj.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import math
5
6 import torch
7 from torch.distributions import constraints
8
9 from pyro.distributions.constraints import corr_cholesky_constraint
10 from pyro.distributions.torch import Beta
11 from pyro.distributions.torch_distribution import TorchDistribution
12 from pyro.distributions.transforms.cholesky import _vector_to_l_cholesky
13
14
15 # TODO: Modify class to support more than one eta value at a time?
16 class LKJCorrCholesky(TorchDistribution):
17 """
18 Generates cholesky factors of correlation matrices using an LKJ prior.
19
20 The expected use is to combine it with a vector of variances and pass it
21 to the scale_tril parameter of a multivariate distribution such as MultivariateNormal.
22
23 E.g., if theta is a (positive) vector of covariances with the same dimensionality
24 as this distribution, and Omega is sampled from this distribution,
25 scale_tril=torch.mm(torch.diag(sqrt(theta)), Omega)
26
27 Note that the `event_shape` of this distribution is `[d, d]`
28
29 .. note::
30
31 When using this distribution with HMC/NUTS, it is important to
32 use a `step_size` such as 1e-4. If not, you are likely to experience LAPACK
33 errors regarding positive-definiteness.
34
35 For example usage, refer to
36 `pyro/examples/lkj.py <https://github.com/pyro-ppl/pyro/blob/dev/examples/lkj.py>`_.
37
38 :param int d: Dimensionality of the matrix
39 :param torch.Tensor eta: A single positive number parameterizing the distribution.
40 """
41 arg_constraints = {"eta": constraints.positive}
42 support = corr_cholesky_constraint
43 has_rsample = False
44
45 def __init__(self, d, eta, validate_args=None):
46 if eta.numel() != 1:
47 raise ValueError("eta must be a single number; for a larger batch size, call expand")
48 if d <= 1:
49 raise ValueError("d must be > 1 in any correlation matrix")
50 eta = eta.squeeze()
51 vector_size = (d * (d - 1)) // 2
52 alpha = eta.add(0.5 * (d - 1.0))
53
54 concentrations = torch.empty(vector_size, dtype=eta.dtype, device=eta.device)
55 i = 0
56 for k in range(d - 1):
57 alpha -= .5
58 concentrations[..., i:(i + d - k - 1)] = alpha
59 i += d - k - 1
60 self._gen = Beta(concentrations, concentrations)
61 self.eta = eta
62 self._d = d
63 self._lkj_constant = None
64 super().__init__(torch.Size(), torch.Size((d, d)), validate_args=validate_args)
65
66 def sample(self, sample_shape=torch.Size()):
67 y = self._gen.sample(sample_shape=self.batch_shape + sample_shape).detach()
68 z = y.mul(2).add(-1.0)
69 return _vector_to_l_cholesky(z)
70
71 def expand(self, batch_shape, _instance=None):
72 new = self._get_checked_instance(LKJCorrCholesky, _instance)
73 batch_shape = torch.Size(batch_shape)
74 new._gen = self._gen
75 new.eta = self.eta
76 new._d = self._d
77 new._lkj_constant = self._lkj_constant
78 super(LKJCorrCholesky, new).__init__(batch_shape, self.event_shape, validate_args=False)
79 new._validate_args = self._validate_args
80 return new
81
82 def lkj_constant(self, eta, K):
83 if self._lkj_constant is not None:
84 return self._lkj_constant
85
86 Km1 = K - 1
87
88 constant = torch.lgamma(eta.add(0.5 * Km1)).mul(Km1)
89
90 k = torch.linspace(start=1, end=Km1, steps=Km1, dtype=eta.dtype, device=eta.device)
91 constant -= (k.mul(math.log(math.pi) * 0.5) + torch.lgamma(eta.add(0.5 * (Km1 - k)))).sum()
92
93 self._lkj_constant = constant
94 return constant
95
96 def log_prob(self, x):
97 if self._validate_args:
98 self._validate_sample(x)
99
100 eta = self.eta
101
102 lp = self.lkj_constant(eta, self._d)
103
104 Km1 = self._d - 1
105
106 log_diagonals = x.diagonal(offset=0, dim1=-1, dim2=-2)[..., 1:].log()
107 # TODO: Figure out why the `device` kwarg to torch.linspace seems to not work in certain situations,
108 # and a seemingly redundant .to(x.device) is needed below.
109 values = log_diagonals * torch.linspace(start=Km1 - 1, end=0, steps=Km1,
110 dtype=x.dtype,
111 device=x.device).expand_as(log_diagonals).to(x.device)
112
113 values += log_diagonals.mul(eta.mul(2).add(-2.0))
114 values = values.sum(-1) + lp
115 values, _ = torch.broadcast_tensors(values, torch.empty(self.batch_shape))
116 return values
117
[end of pyro/distributions/lkj.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/distributions/lkj.py b/pyro/distributions/lkj.py
--- a/pyro/distributions/lkj.py
+++ b/pyro/distributions/lkj.py
@@ -64,7 +64,8 @@
super().__init__(torch.Size(), torch.Size((d, d)), validate_args=validate_args)
def sample(self, sample_shape=torch.Size()):
- y = self._gen.sample(sample_shape=self.batch_shape + sample_shape).detach()
+ with torch.no_grad():
+ y = self._gen.sample(sample_shape=sample_shape + self.batch_shape)
z = y.mul(2).add(-1.0)
return _vector_to_l_cholesky(z)
| {"golden_diff": "diff --git a/pyro/distributions/lkj.py b/pyro/distributions/lkj.py\n--- a/pyro/distributions/lkj.py\n+++ b/pyro/distributions/lkj.py\n@@ -64,7 +64,8 @@\n super().__init__(torch.Size(), torch.Size((d, d)), validate_args=validate_args)\n \n def sample(self, sample_shape=torch.Size()):\n- y = self._gen.sample(sample_shape=self.batch_shape + sample_shape).detach()\n+ with torch.no_grad():\n+ y = self._gen.sample(sample_shape=sample_shape + self.batch_shape)\n z = y.mul(2).add(-1.0)\n return _vector_to_l_cholesky(z)\n", "issue": "[bug] Samples from LKJCorrCholesky have the wrong shape\n### Issue Description\r\nThe batch and sample shapes for `LKJCorrCholesky` are swapped when sampling.\r\n\r\n### Environment\r\nOS: Ubuntu 16.04\r\nPython: 3.7.2\r\nTorch version: 1.6.0\r\nPyro version: 1.4.0\r\n\r\n### Code Snippet\r\nMinimal example:\r\n\r\n```python\r\n>>> dist = pyro.distributions.LKJCorrCholesky(d=3, eta=torch.ones(())).expand(12)\r\n>>> # batch shape and event shape are as you'd expect\r\n>>> dist.batch_shape\r\ntorch.Size([12])\r\n>>> dist.event_shape\r\ntorch.Size([3, 3])\r\n>>> # samples have correct shape when sample_shape=()\r\n>>> dist.shape(())\r\ntorch.Size([12, 3, 3])\r\n>>> dist.sample().shape\r\ntorch.Size([12, 3, 3])\r\n>>> # samples have the wrong shape when sample_shape is non-unit\r\n>>> dist.shape((4,)) # as expected\r\ntorch.Size([4, 12, 3, 3])\r\n>>> dist.sample((4,)).shape\r\ntorch.Size([12, 4, 3, 3])\r\n```\r\n\r\nI think this line is the culprit: https://github.com/pyro-ppl/pyro/blob/7c2c22c10dffda8a33ffbd593cc8d58819959e40/pyro/distributions/lkj.py#L67\r\n\r\n```python\r\n def sample(self, sample_shape=torch.Size()):\r\n y = self._gen.sample(sample_shape=self.batch_shape + sample_shape).detach()\r\n z = y.mul(2).add(-1.0)\r\n return _vector_to_l_cholesky(z)\r\n```\r\n\r\nshould be \r\n\r\n```python\r\n def sample(self, sample_shape=torch.Size()):\r\n y = self._gen.sample(sample_shape=sample_shape + self.batch_shape).detach()\r\n z = y.mul(2).add(-1.0)\r\n return _vector_to_l_cholesky(z)\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\n\nimport torch\nfrom torch.distributions import constraints\n\nfrom pyro.distributions.constraints import corr_cholesky_constraint\nfrom pyro.distributions.torch import Beta\nfrom pyro.distributions.torch_distribution import TorchDistribution\nfrom pyro.distributions.transforms.cholesky import _vector_to_l_cholesky\n\n\n# TODO: Modify class to support more than one eta value at a time?\nclass LKJCorrCholesky(TorchDistribution):\n \"\"\"\n Generates cholesky factors of correlation matrices using an LKJ prior.\n\n The expected use is to combine it with a vector of variances and pass it\n to the scale_tril parameter of a multivariate distribution such as MultivariateNormal.\n\n E.g., if theta is a (positive) vector of covariances with the same dimensionality\n as this distribution, and Omega is sampled from this distribution,\n scale_tril=torch.mm(torch.diag(sqrt(theta)), Omega)\n\n Note that the `event_shape` of this distribution is `[d, d]`\n\n .. note::\n\n When using this distribution with HMC/NUTS, it is important to\n use a `step_size` such as 1e-4. If not, you are likely to experience LAPACK\n errors regarding positive-definiteness.\n\n For example usage, refer to\n `pyro/examples/lkj.py <https://github.com/pyro-ppl/pyro/blob/dev/examples/lkj.py>`_.\n\n :param int d: Dimensionality of the matrix\n :param torch.Tensor eta: A single positive number parameterizing the distribution.\n \"\"\"\n arg_constraints = {\"eta\": constraints.positive}\n support = corr_cholesky_constraint\n has_rsample = False\n\n def __init__(self, d, eta, validate_args=None):\n if eta.numel() != 1:\n raise ValueError(\"eta must be a single number; for a larger batch size, call expand\")\n if d <= 1:\n raise ValueError(\"d must be > 1 in any correlation matrix\")\n eta = eta.squeeze()\n vector_size = (d * (d - 1)) // 2\n alpha = eta.add(0.5 * (d - 1.0))\n\n concentrations = torch.empty(vector_size, dtype=eta.dtype, device=eta.device)\n i = 0\n for k in range(d - 1):\n alpha -= .5\n concentrations[..., i:(i + d - k - 1)] = alpha\n i += d - k - 1\n self._gen = Beta(concentrations, concentrations)\n self.eta = eta\n self._d = d\n self._lkj_constant = None\n super().__init__(torch.Size(), torch.Size((d, d)), validate_args=validate_args)\n\n def sample(self, sample_shape=torch.Size()):\n y = self._gen.sample(sample_shape=self.batch_shape + sample_shape).detach()\n z = y.mul(2).add(-1.0)\n return _vector_to_l_cholesky(z)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(LKJCorrCholesky, _instance)\n batch_shape = torch.Size(batch_shape)\n new._gen = self._gen\n new.eta = self.eta\n new._d = self._d\n new._lkj_constant = self._lkj_constant\n super(LKJCorrCholesky, new).__init__(batch_shape, self.event_shape, validate_args=False)\n new._validate_args = self._validate_args\n return new\n\n def lkj_constant(self, eta, K):\n if self._lkj_constant is not None:\n return self._lkj_constant\n\n Km1 = K - 1\n\n constant = torch.lgamma(eta.add(0.5 * Km1)).mul(Km1)\n\n k = torch.linspace(start=1, end=Km1, steps=Km1, dtype=eta.dtype, device=eta.device)\n constant -= (k.mul(math.log(math.pi) * 0.5) + torch.lgamma(eta.add(0.5 * (Km1 - k)))).sum()\n\n self._lkj_constant = constant\n return constant\n\n def log_prob(self, x):\n if self._validate_args:\n self._validate_sample(x)\n\n eta = self.eta\n\n lp = self.lkj_constant(eta, self._d)\n\n Km1 = self._d - 1\n\n log_diagonals = x.diagonal(offset=0, dim1=-1, dim2=-2)[..., 1:].log()\n # TODO: Figure out why the `device` kwarg to torch.linspace seems to not work in certain situations,\n # and a seemingly redundant .to(x.device) is needed below.\n values = log_diagonals * torch.linspace(start=Km1 - 1, end=0, steps=Km1,\n dtype=x.dtype,\n device=x.device).expand_as(log_diagonals).to(x.device)\n\n values += log_diagonals.mul(eta.mul(2).add(-2.0))\n values = values.sum(-1) + lp\n values, _ = torch.broadcast_tensors(values, torch.empty(self.batch_shape))\n return values\n", "path": "pyro/distributions/lkj.py"}]} | 2,449 | 154 |
gh_patches_debug_11622 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1809 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AWS::ApiGateway::Authorizer - E3008 Property "RestApiId" has no valid Fn::GetAtt options
Given the following template:
```yaml
AWSTemplateFormatVersion: '2010-09-09'
Resources:
ProvisionedProduct:
Type: AWS::ServiceCatalog::CloudFormationProvisionedProduct
Properties:
ProductName: example
ProvisioningArtifactName: v1
Authoriser:
Type: AWS::ApiGateway::Authorizer
Properties:
RestApiId: !GetAtt ProvisionedProduct.Outputs.RestApiId
Type: TOKEN
```
The following error is output when using cfn-lint 0.42.0:
```
E3008 Property "RestApiId" has no valid Fn::GetAtt options at Resources/Authoriser/Properties/RestApiId/Fn::GetAtt
template.yml:13:7
```
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/ValueRefGetAtt.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.rules import CloudFormationLintRule
7 from cfnlint.rules import RuleMatch
8 from cfnlint.helpers import RESOURCE_SPECS
9 import cfnlint.helpers
10
11
12 class ValueRefGetAtt(CloudFormationLintRule):
13 """Check if Resource Properties are correct"""
14 id = 'E3008'
15 shortdesc = 'Check values of properties for valid Refs and GetAtts'
16 description = 'Checks resource properties for Ref and GetAtt values'
17 tags = ['resources', 'ref', 'getatt']
18
19 def initialize(self, cfn):
20 """Initialize the rule"""
21 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
22 self.resource_property_types.append(resource_type_spec)
23 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
24 self.resource_sub_property_types.append(property_type_spec)
25
26 def is_value_a_list(self, path, property_name):
27 """
28 Determines if the value checked is a list or a value in a list
29 We need to handle conditions in the path that could be nested, etc.
30 ['Resources', 'LoadBalancer', 'Properties', 'Subnets', 'Fn::If', 2, 'Fn::If', 2]
31 Numbers preceeded by a Fn::If should be removed and check repeated.
32 """
33 if path[-1] != property_name:
34 # Property doesn't match the property name
35 # Check if its a number and a condition
36 if isinstance(path[-1], int) and path[-2] == 'Fn::If':
37 return self.is_value_a_list(path[:-2], property_name)
38
39 return False
40
41 return True
42
43 def check_value_ref(self, value, path, **kwargs):
44 """Check Ref"""
45 matches = list()
46 cfn = kwargs.get('cfn')
47 value_specs = kwargs.get('value_specs', {}).get('Ref')
48 list_value_specs = kwargs.get('list_value_specs', {}).get('Ref')
49 property_type = kwargs.get('property_type')
50 property_name = kwargs.get('property_name')
51 if path[-1] == 'Ref' and property_type == 'List' and self.is_value_a_list(path[:-1], property_name):
52 specs = list_value_specs
53 else:
54 specs = value_specs
55
56 if not specs:
57 # If no Ref's are specified, just skip
58 # Opposite of GetAtt you will always have a Ref to a Parameter so if this is
59 # None it just hasn't been defined and we can skip
60 return matches
61
62 if value in cfn.template.get('Parameters', {}):
63 param = cfn.template.get('Parameters').get(value, {})
64 parameter_type = param.get('Type')
65 valid_parameter_types = []
66 for parameter in specs.get('Parameters'):
67 for param_type in RESOURCE_SPECS.get(cfn.regions[0]).get('ParameterTypes').get(parameter):
68 valid_parameter_types.append(param_type)
69
70 if not specs.get('Parameters'):
71 message = 'Property "{0}" has no valid Refs to Parameters at {1}'
72 matches.append(RuleMatch(path, message.format(
73 property_name, '/'.join(map(str, path)))))
74 elif parameter_type not in valid_parameter_types:
75 message = 'Property "{0}" can Ref to parameter of types [{1}] at {2}'
76 matches.append(
77 RuleMatch(
78 path,
79 message.format(
80 property_name,
81 ', '.join(map(str, valid_parameter_types)),
82 '/'.join(map(str, path)))))
83 if value in cfn.template.get('Resources', {}):
84 resource = cfn.template.get('Resources').get(value, {})
85 resource_type = resource.get('Type')
86 if not specs.get('Resources'):
87 message = 'Property "{0}" has no valid Refs to Resources at {1}'
88 matches.append(RuleMatch(path, message.format(
89 property_name, '/'.join(map(str, path)))))
90 elif resource_type not in specs.get('Resources'):
91 message = 'Property "{0}" can Ref to resources of types [{1}] at {2}'
92 matches.append(
93 RuleMatch(
94 path,
95 message.format(
96 property_name,
97 ', '.join(map(str, specs.get('Resources'))),
98 '/'.join(map(str, path)))))
99
100 return matches
101
102 def check_value_getatt(self, value, path, **kwargs):
103 """Check GetAtt"""
104 matches = []
105 cfn = kwargs.get('cfn')
106 value_specs = kwargs.get('value_specs', {}).get('GetAtt')
107 list_value_specs = kwargs.get('list_value_specs', {}).get('GetAtt')
108 property_type = kwargs.get('property_type')
109 property_name = kwargs.get('property_name')
110 # You can sometimes get a list or a string with . in it
111 if isinstance(value, list):
112 resource_name = value[0]
113 if len(value[1:]) == 1:
114 resource_attribute = value[1].split('.')
115 else:
116 resource_attribute = value[1:]
117 elif isinstance(value, six.string_types):
118 resource_name = value.split('.')[0]
119 resource_attribute = value.split('.')[1:]
120 is_value_a_list = self.is_value_a_list(path[:-1], property_name)
121 if path[-1] == 'Fn::GetAtt' and property_type == 'List' and is_value_a_list:
122 specs = list_value_specs
123 else:
124 specs = value_specs
125
126 resource_type = cfn.template.get('Resources', {}).get(resource_name, {}).get('Type')
127
128 if cfnlint.helpers.is_custom_resource(resource_type):
129 # A custom resource voids the spec. Move on
130 return matches
131
132 if resource_type == 'AWS::CloudFormation::Stack' and resource_attribute[0] == 'Outputs':
133 # Nested Stack Outputs
134 # if its a string type we are good and return matches
135 # if its a list its a failure as Outputs can only be strings
136 if is_value_a_list and property_type == 'List':
137 message = 'CloudFormation stack outputs need to be strings not lists at {0}'
138 matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))
139
140 return matches
141
142 if specs is None:
143 # GetAtt specs aren't specified skip
144 return matches
145 if not specs:
146 # GetAtt is specified but empty so there are no valid options
147 message = 'Property "{0}" has no valid Fn::GetAtt options at {1}'
148 matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))
149 return matches
150
151 if resource_type not in specs:
152 message = 'Property "{0}" can Fn::GetAtt to a resource of types [{1}] at {2}'
153 matches.append(
154 RuleMatch(
155 path,
156 message.format(
157 property_name,
158 ', '.join(map(str, specs)),
159 '/'.join(map(str, path)))))
160 elif '.'.join(map(str, resource_attribute)) != specs[resource_type]:
161 message = 'Property "{0}" can Fn::GetAtt to a resource attribute "{1}" at {2}'
162 matches.append(
163 RuleMatch(
164 path,
165 message.format(
166 property_name,
167 specs[resource_type],
168 '/'.join(map(str, path)))))
169
170 return matches
171
172 def check(self, cfn, properties, value_specs, property_specs, path):
173 """Check itself"""
174 matches = list()
175 for p_value, p_path in properties.items_safe(path[:]):
176 for prop in p_value:
177 if prop in value_specs:
178 value = value_specs.get(prop).get('Value', {})
179 if value:
180 value_type = value.get('ValueType', '')
181 list_value_type = value.get('ListValueType', '')
182 property_type = property_specs.get('Properties').get(prop).get('Type')
183 matches.extend(
184 cfn.check_value(
185 p_value, prop, p_path,
186 check_ref=self.check_value_ref,
187 check_get_att=self.check_value_getatt,
188 value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get(
189 'ValueTypes').get(value_type, {}),
190 list_value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get(
191 'ValueTypes').get(list_value_type, {}),
192 cfn=cfn, property_type=property_type, property_name=prop
193 )
194 )
195
196 return matches
197
198 def match_resource_sub_properties(self, properties, property_type, path, cfn):
199 """Match for sub properties"""
200 matches = list()
201
202 specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
203 'PropertyTypes').get(property_type, {}).get('Properties', {})
204 property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
205 matches.extend(self.check(cfn, properties, specs, property_specs, path))
206
207 return matches
208
209 def match_resource_properties(self, properties, resource_type, path, cfn):
210 """Check CloudFormation Properties"""
211 matches = list()
212
213 specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
214 'ResourceTypes').get(resource_type, {}).get('Properties', {})
215 resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
216 matches.extend(self.check(cfn, properties, specs, resource_specs, path))
217
218 return matches
219
[end of src/cfnlint/rules/resources/properties/ValueRefGetAtt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py
--- a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py
+++ b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py
@@ -129,7 +129,7 @@
# A custom resource voids the spec. Move on
return matches
- if resource_type == 'AWS::CloudFormation::Stack' and resource_attribute[0] == 'Outputs':
+ if resource_type in ['AWS::CloudFormation::Stack', 'AWS::ServiceCatalog::CloudFormationProvisionedProduct'] and resource_attribute[0] == 'Outputs':
# Nested Stack Outputs
# if its a string type we are good and return matches
# if its a list its a failure as Outputs can only be strings
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py\n--- a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py\n+++ b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py\n@@ -129,7 +129,7 @@\n # A custom resource voids the spec. Move on\n return matches\n \n- if resource_type == 'AWS::CloudFormation::Stack' and resource_attribute[0] == 'Outputs':\n+ if resource_type in ['AWS::CloudFormation::Stack', 'AWS::ServiceCatalog::CloudFormationProvisionedProduct'] and resource_attribute[0] == 'Outputs':\n # Nested Stack Outputs\n # if its a string type we are good and return matches\n # if its a list its a failure as Outputs can only be strings\n", "issue": "AWS::ApiGateway::Authorizer - E3008 Property \"RestApiId\" has no valid Fn::GetAtt options\nGiven the following template:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: '2010-09-09'\r\n\r\nResources:\r\n ProvisionedProduct:\r\n Type: AWS::ServiceCatalog::CloudFormationProvisionedProduct\r\n Properties:\r\n ProductName: example\r\n ProvisioningArtifactName: v1\r\n\r\n Authoriser:\r\n Type: AWS::ApiGateway::Authorizer\r\n Properties:\r\n RestApiId: !GetAtt ProvisionedProduct.Outputs.RestApiId\r\n Type: TOKEN\r\n```\r\n\r\nThe following error is output when using cfn-lint 0.42.0:\r\n\r\n```\r\nE3008 Property \"RestApiId\" has no valid Fn::GetAtt options at Resources/Authoriser/Properties/RestApiId/Fn::GetAtt\r\ntemplate.yml:13:7\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import RESOURCE_SPECS\nimport cfnlint.helpers\n\n\nclass ValueRefGetAtt(CloudFormationLintRule):\n \"\"\"Check if Resource Properties are correct\"\"\"\n id = 'E3008'\n shortdesc = 'Check values of properties for valid Refs and GetAtts'\n description = 'Checks resource properties for Ref and GetAtt values'\n tags = ['resources', 'ref', 'getatt']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def is_value_a_list(self, path, property_name):\n \"\"\"\n Determines if the value checked is a list or a value in a list\n We need to handle conditions in the path that could be nested, etc.\n ['Resources', 'LoadBalancer', 'Properties', 'Subnets', 'Fn::If', 2, 'Fn::If', 2]\n Numbers preceeded by a Fn::If should be removed and check repeated.\n \"\"\"\n if path[-1] != property_name:\n # Property doesn't match the property name\n # Check if its a number and a condition\n if isinstance(path[-1], int) and path[-2] == 'Fn::If':\n return self.is_value_a_list(path[:-2], property_name)\n\n return False\n\n return True\n\n def check_value_ref(self, value, path, **kwargs):\n \"\"\"Check Ref\"\"\"\n matches = list()\n cfn = kwargs.get('cfn')\n value_specs = kwargs.get('value_specs', {}).get('Ref')\n list_value_specs = kwargs.get('list_value_specs', {}).get('Ref')\n property_type = kwargs.get('property_type')\n property_name = kwargs.get('property_name')\n if path[-1] == 'Ref' and property_type == 'List' and self.is_value_a_list(path[:-1], property_name):\n specs = list_value_specs\n else:\n specs = value_specs\n\n if not specs:\n # If no Ref's are specified, just skip\n # Opposite of GetAtt you will always have a Ref to a Parameter so if this is\n # None it just hasn't been defined and we can skip\n return matches\n\n if value in cfn.template.get('Parameters', {}):\n param = cfn.template.get('Parameters').get(value, {})\n parameter_type = param.get('Type')\n valid_parameter_types = []\n for parameter in specs.get('Parameters'):\n for param_type in RESOURCE_SPECS.get(cfn.regions[0]).get('ParameterTypes').get(parameter):\n valid_parameter_types.append(param_type)\n\n if not specs.get('Parameters'):\n message = 'Property \"{0}\" has no valid Refs to Parameters at {1}'\n matches.append(RuleMatch(path, message.format(\n property_name, '/'.join(map(str, path)))))\n elif parameter_type not in valid_parameter_types:\n message = 'Property \"{0}\" can Ref to parameter of types [{1}] at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n ', '.join(map(str, valid_parameter_types)),\n '/'.join(map(str, path)))))\n if value in cfn.template.get('Resources', {}):\n resource = cfn.template.get('Resources').get(value, {})\n resource_type = resource.get('Type')\n if not specs.get('Resources'):\n message = 'Property \"{0}\" has no valid Refs to Resources at {1}'\n matches.append(RuleMatch(path, message.format(\n property_name, '/'.join(map(str, path)))))\n elif resource_type not in specs.get('Resources'):\n message = 'Property \"{0}\" can Ref to resources of types [{1}] at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n ', '.join(map(str, specs.get('Resources'))),\n '/'.join(map(str, path)))))\n\n return matches\n\n def check_value_getatt(self, value, path, **kwargs):\n \"\"\"Check GetAtt\"\"\"\n matches = []\n cfn = kwargs.get('cfn')\n value_specs = kwargs.get('value_specs', {}).get('GetAtt')\n list_value_specs = kwargs.get('list_value_specs', {}).get('GetAtt')\n property_type = kwargs.get('property_type')\n property_name = kwargs.get('property_name')\n # You can sometimes get a list or a string with . in it\n if isinstance(value, list):\n resource_name = value[0]\n if len(value[1:]) == 1:\n resource_attribute = value[1].split('.')\n else:\n resource_attribute = value[1:]\n elif isinstance(value, six.string_types):\n resource_name = value.split('.')[0]\n resource_attribute = value.split('.')[1:]\n is_value_a_list = self.is_value_a_list(path[:-1], property_name)\n if path[-1] == 'Fn::GetAtt' and property_type == 'List' and is_value_a_list:\n specs = list_value_specs\n else:\n specs = value_specs\n\n resource_type = cfn.template.get('Resources', {}).get(resource_name, {}).get('Type')\n\n if cfnlint.helpers.is_custom_resource(resource_type):\n # A custom resource voids the spec. Move on\n return matches\n\n if resource_type == 'AWS::CloudFormation::Stack' and resource_attribute[0] == 'Outputs':\n # Nested Stack Outputs\n # if its a string type we are good and return matches\n # if its a list its a failure as Outputs can only be strings\n if is_value_a_list and property_type == 'List':\n message = 'CloudFormation stack outputs need to be strings not lists at {0}'\n matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))\n\n return matches\n\n if specs is None:\n # GetAtt specs aren't specified skip\n return matches\n if not specs:\n # GetAtt is specified but empty so there are no valid options\n message = 'Property \"{0}\" has no valid Fn::GetAtt options at {1}'\n matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))\n return matches\n\n if resource_type not in specs:\n message = 'Property \"{0}\" can Fn::GetAtt to a resource of types [{1}] at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n ', '.join(map(str, specs)),\n '/'.join(map(str, path)))))\n elif '.'.join(map(str, resource_attribute)) != specs[resource_type]:\n message = 'Property \"{0}\" can Fn::GetAtt to a resource attribute \"{1}\" at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n specs[resource_type],\n '/'.join(map(str, path)))))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n list_value_type = value.get('ListValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_ref=self.check_value_ref,\n check_get_att=self.check_value_getatt,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ValueTypes').get(value_type, {}),\n list_value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ValueTypes').get(list_value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ValueRefGetAtt.py"}]} | 3,350 | 198 |
gh_patches_debug_12157 | rasdani/github-patches | git_diff | pyro-ppl__pyro-198 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pytorch broadcasting
in various places in the codebase we have tensor ops like `expand_as()`. given the extended broadcasting functionality in the latest version of pytorch, some of these may be unnecessary and/or clunky. more generally, we should investigate and deal with any pytorch warnings that popped once once we switched pytorch versions. for example:
_UserWarning: other is not broadcastable to self, but they have the same number of elements. Falling back to deprecated pointwise behavior._
</issue>
<code>
[start of examples/categorical_bmm.py]
1 import argparse
2
3 import numpy as np
4 import torch
5 import torchvision.datasets as dset
6 import torchvision.transforms as transforms
7 import visdom
8 from torch.autograd import Variable
9 from torch.nn import Softmax
10
11 import pyro
12 from pyro.distributions import Bernoulli, Categorical
13 from pyro.infer.kl_qp import KL_QP
14
15 mnist = dset.MNIST(
16 root='./data',
17 train=True,
18 transform=None,
19 target_transform=None,
20 download=True)
21 print('dataset loaded')
22
23 softmax = Softmax()
24
25 train_loader = torch.utils.data.DataLoader(
26 dset.MNIST('../data', train=True, download=True,
27 transform=transforms.Compose([
28 transforms.ToTensor(),
29 transforms.Normalize((0.1307,), (0.3081,))
30 ])),
31 batch_size=128, shuffle=True)
32 test_loader = torch.utils.data.DataLoader(
33 dset.MNIST('../data', train=False, transform=transforms.Compose([
34 transforms.ToTensor(),
35 transforms.Normalize((0.1307,), (0.3081,))
36 ])),
37 batch_size=128, shuffle=True)
38
39
40 def local_model(i, datum):
41 beta = Variable(torch.ones(1, 10)) * 0.1
42 cll = pyro.sample("class_of_datum_" + str(i), Categorical(beta))
43 mean_param = Variable(torch.zeros(1, 784), requires_grad=True)
44 # do MLE for class means
45 mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
46 mu_param = softmax(mu)
47 pyro.observe("obs_" + str(i), Bernoulli(mu_param), datum)
48 return cll
49
50
51 def local_guide(i, datum):
52 alpha = torch.ones(1, 10) * 0.1
53 beta_q = Variable(alpha, requires_grad=True)
54 beta_param = pyro.param("class_posterior_", beta_q)
55 guide_params = softmax(beta_param)
56 cll = pyro.sample("class_of_datum_" + str(i), Categorical(guide_params))
57 return cll
58
59
60 def inspect_posterior_samples(i):
61 cll = local_guide(i, None)
62 mean_param = Variable(torch.zeros(1, 784), requires_grad=True)
63 # do MLE for class means
64 mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
65 dat = pyro.sample("obs_" + str(i), Bernoulli(mu))
66 return dat
67
68
69 optim_fct = pyro.optim(torch.optim.Adam, {'lr': .0001})
70
71 inference = KL_QP(local_model, local_guide, optim_fct)
72
73 vis = visdom.Visdom()
74
75 nr_epochs = 50
76 # apply it to minibatches of data by hand:
77
78 mnist_data = Variable(train_loader.dataset.train_data.float() / 255.)
79 mnist_labels = Variable(train_loader.dataset.train_labels)
80 mnist_size = mnist_data.size(0)
81 batch_size = 1 # 64
82
83 all_batches = np.arange(0, mnist_size, batch_size)
84
85 if all_batches[-1] != mnist_size:
86 all_batches = list(all_batches) + [mnist_size]
87
88
89 def main():
90 parser = argparse.ArgumentParser(description="parse args")
91 parser.add_argument('-n', '--num-epochs', nargs='?', default=1000, type=int)
92 args = parser.parse_args()
93 for i in range(args.num_epochs):
94 epoch_loss = 0.
95 for ix, batch_start in enumerate(all_batches[:-1]):
96 batch_end = all_batches[ix + 1]
97 batch_data = mnist_data[batch_start:batch_end]
98 bs_size = batch_data.size(0)
99 batch_class_raw = mnist_labels[batch_start:batch_end]
100 batch_class = torch.zeros(bs_size, 10) # maybe it needs a FloatTensor
101 batch_class.scatter_(1, batch_class_raw.data.view(-1, 1), 1)
102 batch_class = Variable(batch_class)
103 epoch_loss += inference.step(ix, batch_data)
104
105 # optional visualization!
106 # vis.image(batch_data[0].view(28, 28).data.numpy())
107 # vis.image(sample[0].view(28, 28).data.numpy())
108 # vis.image(sample_mu[0].view(28, 28).data.numpy())
109 print("epoch avg loss {}".format(epoch_loss / float(mnist_size)))
110
111
112 if __name__ == '__main__':
113 main()
114
[end of examples/categorical_bmm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/categorical_bmm.py b/examples/categorical_bmm.py
--- a/examples/categorical_bmm.py
+++ b/examples/categorical_bmm.py
@@ -12,6 +12,7 @@
from pyro.distributions import Bernoulli, Categorical
from pyro.infer.kl_qp import KL_QP
+
mnist = dset.MNIST(
root='./data',
train=True,
@@ -44,7 +45,7 @@
# do MLE for class means
mu = pyro.param("mean_of_class_" + str(cll[0]), mean_param)
mu_param = softmax(mu)
- pyro.observe("obs_" + str(i), Bernoulli(mu_param), datum)
+ pyro.observe("obs_" + str(i), Bernoulli(mu_param), datum.view(1, -1))
return cll
| {"golden_diff": "diff --git a/examples/categorical_bmm.py b/examples/categorical_bmm.py\n--- a/examples/categorical_bmm.py\n+++ b/examples/categorical_bmm.py\n@@ -12,6 +12,7 @@\n from pyro.distributions import Bernoulli, Categorical\n from pyro.infer.kl_qp import KL_QP\n \n+\n mnist = dset.MNIST(\n root='./data',\n train=True,\n@@ -44,7 +45,7 @@\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n mu_param = softmax(mu)\n- pyro.observe(\"obs_\" + str(i), Bernoulli(mu_param), datum)\n+ pyro.observe(\"obs_\" + str(i), Bernoulli(mu_param), datum.view(1, -1))\n return cll\n", "issue": "pytorch broadcasting\nin various places in the codebase we have tensor ops like `expand_as()`. given the extended broadcasting functionality in the latest version of pytorch, some of these may be unnecessary and/or clunky. more generally, we should investigate and deal with any pytorch warnings that popped once once we switched pytorch versions. for example: \r\n\r\n_UserWarning: other is not broadcastable to self, but they have the same number of elements. Falling back to deprecated pointwise behavior._\n", "before_files": [{"content": "import argparse\n\nimport numpy as np\nimport torch\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport visdom\nfrom torch.autograd import Variable\nfrom torch.nn import Softmax\n\nimport pyro\nfrom pyro.distributions import Bernoulli, Categorical\nfrom pyro.infer.kl_qp import KL_QP\n\nmnist = dset.MNIST(\n root='./data',\n train=True,\n transform=None,\n target_transform=None,\n download=True)\nprint('dataset loaded')\n\nsoftmax = Softmax()\n\ntrain_loader = torch.utils.data.DataLoader(\n dset.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(\n dset.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True)\n\n\ndef local_model(i, datum):\n beta = Variable(torch.ones(1, 10)) * 0.1\n cll = pyro.sample(\"class_of_datum_\" + str(i), Categorical(beta))\n mean_param = Variable(torch.zeros(1, 784), requires_grad=True)\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n mu_param = softmax(mu)\n pyro.observe(\"obs_\" + str(i), Bernoulli(mu_param), datum)\n return cll\n\n\ndef local_guide(i, datum):\n alpha = torch.ones(1, 10) * 0.1\n beta_q = Variable(alpha, requires_grad=True)\n beta_param = pyro.param(\"class_posterior_\", beta_q)\n guide_params = softmax(beta_param)\n cll = pyro.sample(\"class_of_datum_\" + str(i), Categorical(guide_params))\n return cll\n\n\ndef inspect_posterior_samples(i):\n cll = local_guide(i, None)\n mean_param = Variable(torch.zeros(1, 784), requires_grad=True)\n # do MLE for class means\n mu = pyro.param(\"mean_of_class_\" + str(cll[0]), mean_param)\n dat = pyro.sample(\"obs_\" + str(i), Bernoulli(mu))\n return dat\n\n\noptim_fct = pyro.optim(torch.optim.Adam, {'lr': .0001})\n\ninference = KL_QP(local_model, local_guide, optim_fct)\n\nvis = visdom.Visdom()\n\nnr_epochs = 50\n# apply it to minibatches of data by hand:\n\nmnist_data = Variable(train_loader.dataset.train_data.float() / 255.)\nmnist_labels = Variable(train_loader.dataset.train_labels)\nmnist_size = mnist_data.size(0)\nbatch_size = 1 # 64\n\nall_batches = np.arange(0, mnist_size, batch_size)\n\nif all_batches[-1] != mnist_size:\n all_batches = list(all_batches) + [mnist_size]\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"parse args\")\n parser.add_argument('-n', '--num-epochs', nargs='?', default=1000, type=int)\n args = parser.parse_args()\n for i in range(args.num_epochs):\n epoch_loss = 0.\n for ix, batch_start in enumerate(all_batches[:-1]):\n batch_end = all_batches[ix + 1]\n batch_data = mnist_data[batch_start:batch_end]\n bs_size = batch_data.size(0)\n batch_class_raw = mnist_labels[batch_start:batch_end]\n batch_class = torch.zeros(bs_size, 10) # maybe it needs a FloatTensor\n batch_class.scatter_(1, batch_class_raw.data.view(-1, 1), 1)\n batch_class = Variable(batch_class)\n epoch_loss += inference.step(ix, batch_data)\n\n # optional visualization!\n # vis.image(batch_data[0].view(28, 28).data.numpy())\n # vis.image(sample[0].view(28, 28).data.numpy())\n # vis.image(sample_mu[0].view(28, 28).data.numpy())\n print(\"epoch avg loss {}\".format(epoch_loss / float(mnist_size)))\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/categorical_bmm.py"}]} | 1,870 | 192 |
gh_patches_debug_22576 | rasdani/github-patches | git_diff | google__mobly-799 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mobly Release 1.11.1
</issue>
<code>
[start of setup.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import platform
16 import setuptools
17 from setuptools.command import test
18 import sys
19
20 install_requires = [
21 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'
22 ]
23
24 if platform.system() == 'Windows':
25 install_requires.append('pywin32')
26
27
28 class PyTest(test.test):
29 """Class used to execute unit tests using PyTest. This allows us to execute
30 unit tests without having to install the package.
31 """
32
33 def finalize_options(self):
34 test.test.finalize_options(self)
35 self.test_args = ['-x', "tests/mobly"]
36 self.test_suite = True
37
38 def run_tests(self):
39 import pytest
40 errno = pytest.main(self.test_args)
41 sys.exit(errno)
42
43
44 def main():
45 setuptools.setup(
46 name='mobly',
47 version='1.11',
48 maintainer='Ang Li',
49 maintainer_email='[email protected]',
50 description='Automation framework for special end-to-end test cases',
51 license='Apache2.0',
52 url='https://github.com/google/mobly',
53 download_url='https://github.com/google/mobly/tarball/1.11',
54 packages=setuptools.find_packages(exclude=['tests']),
55 include_package_data=False,
56 scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
57 tests_require=[
58 'mock',
59 'pytest',
60 'pytz',
61 ],
62 install_requires=install_requires,
63 cmdclass={'test': PyTest},
64 )
65
66
67 if __name__ == '__main__':
68 main()
69
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
import sys
install_requires = [
- 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'
+ 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions>=4.1.1'
]
if platform.system() == 'Windows':
@@ -44,13 +44,13 @@
def main():
setuptools.setup(
name='mobly',
- version='1.11',
+ version='1.11.1',
maintainer='Ang Li',
maintainer_email='[email protected]',
description='Automation framework for special end-to-end test cases',
license='Apache2.0',
url='https://github.com/google/mobly',
- download_url='https://github.com/google/mobly/tarball/1.11',
+ download_url='https://github.com/google/mobly/tarball/1.11.1',
packages=setuptools.find_packages(exclude=['tests']),
include_package_data=False,
scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n import sys\n \n install_requires = [\n- 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'\n+ 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions>=4.1.1'\n ]\n \n if platform.system() == 'Windows':\n@@ -44,13 +44,13 @@\n def main():\n setuptools.setup(\n name='mobly',\n- version='1.11',\n+ version='1.11.1',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n- download_url='https://github.com/google/mobly/tarball/1.11',\n+ download_url='https://github.com/google/mobly/tarball/1.11.1',\n packages=setuptools.find_packages(exclude=['tests']),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n", "issue": "Mobly Release 1.11.1\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'portpicker', 'pyserial', 'pyyaml', 'timeout_decorator', 'typing_extensions'\n]\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests/mobly\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.11',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n download_url='https://github.com/google/mobly/tarball/1.11',\n packages=setuptools.find_packages(exclude=['tests']),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=[\n 'mock',\n 'pytest',\n 'pytz',\n ],\n install_requires=install_requires,\n cmdclass={'test': PyTest},\n )\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]} | 1,143 | 281 |
gh_patches_debug_22271 | rasdani/github-patches | git_diff | pydantic__pydantic-299 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Сreating a child model causes a RecursionError exception
<!-- Questions, Feature Requests, and Bug Reports are all welcome -->
<!-- delete as applicable: -->
# Bug
* OS: **Ubuntu 14.04**
* Python version `import sys; print(sys.version)`: **3.6.7**
* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**
I'm expecting, that I can use a classic inheritance for dataclass models:
```py
import pydantic.dataclasses
@pydantic.dataclasses.dataclass
class A:
a: str = None
@pydantic.dataclasses.dataclass
class B(A):
b: str = None
B(a='a', b='b')
```
But as a result I'm receiving this:
```
Traceback (most recent call last):
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-9-10a7116ca691>", line 12, in <module>
B(a='a', b='b')
File "<string>", line 4, in __init__
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init
self.__post_init_original__()
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init
self.__post_init_original__()
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init
self.__post_init_original__()
[Previous line repeated 952 more times]
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 9, in post_init
d = validate_model(self.__pydantic_model__, self.__dict__)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/main.py", line 484, in validate_model
v_, errors_ = field.validate(value, values, loc=field.alias, cls=model.__class__)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 303, in validate
v, errors = self._validate_singleton(v, values, loc, cls)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 406, in _validate_singleton
return self._apply_validators(v, values, loc, cls, self.validators)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 412, in _apply_validators
v = validator(v)
File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/validators.py", line 23, in str_validator
if isinstance(v, (str, NoneType)):
RecursionError: maximum recursion depth exceeded in __instancecheck__
```
This line below causes this problem:
https://github.com/samuelcolvin/pydantic/blob/master/pydantic/dataclasses.py#L13
UPD: defining `__post_init__` in child model fixes the problem, but this is workaround.
```py
import pydantic.dataclasses
@pydantic.dataclasses.dataclass
class A:
a: str = None
@pydantic.dataclasses.dataclass
class B(A):
b: str = None
def __post_init__():
pass
B(a='a', b='b')
```
</issue>
<code>
[start of pydantic/dataclasses.py]
1 import dataclasses
2
3 from pydantic import ValidationError
4
5 from .main import create_model, validate_model
6
7
8 def post_init(self):
9 d = validate_model(self.__pydantic_model__, self.__dict__)
10 object.__setattr__(self, '__dict__', d)
11 object.__setattr__(self, '__initialised__', True)
12 if self.__post_init_original__:
13 self.__post_init_original__()
14
15
16 def setattr_validate_assignment(self, name, value):
17 if self.__initialised__:
18 d = dict(self.__dict__)
19 d.pop(name)
20 value, error_ = self.__pydantic_model__.__fields__[name].validate(value, d, loc=name)
21 if error_:
22 raise ValidationError([error_])
23
24 object.__setattr__(self, name, value)
25
26
27 def _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):
28 post_init_original = getattr(_cls, '__post_init__', None)
29 _cls.__post_init__ = post_init
30 cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)
31
32 fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}
33 cls.__post_init_original__ = post_init_original
34 cls.__pydantic_model__ = create_model(cls.__name__, **fields)
35 cls.__initialised__ = False
36
37 if validate_assignment and not frozen:
38 cls.__setattr__ = setattr_validate_assignment
39 return cls
40
41
42 def dataclass(
43 _cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, validate_assignment=False
44 ):
45 """
46 Like the python standard lib dataclasses but with type validation.
47
48 Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning
49 as Config.validate_assignment.
50 """
51
52 def wrap(cls):
53 return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment)
54
55 if _cls is None:
56 return wrap
57
58 return wrap(_cls)
59
[end of pydantic/dataclasses.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py
--- a/pydantic/dataclasses.py
+++ b/pydantic/dataclasses.py
@@ -5,7 +5,7 @@
from .main import create_model, validate_model
-def post_init(self):
+def _pydantic_post_init(self):
d = validate_model(self.__pydantic_model__, self.__dict__)
object.__setattr__(self, '__dict__', d)
object.__setattr__(self, '__initialised__', True)
@@ -26,7 +26,9 @@
def _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):
post_init_original = getattr(_cls, '__post_init__', None)
- _cls.__post_init__ = post_init
+ if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
+ post_init_original = None
+ _cls.__post_init__ = _pydantic_post_init
cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)
fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}
| {"golden_diff": "diff --git a/pydantic/dataclasses.py b/pydantic/dataclasses.py\n--- a/pydantic/dataclasses.py\n+++ b/pydantic/dataclasses.py\n@@ -5,7 +5,7 @@\n from .main import create_model, validate_model\n \n \n-def post_init(self):\n+def _pydantic_post_init(self):\n d = validate_model(self.__pydantic_model__, self.__dict__)\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n@@ -26,7 +26,9 @@\n \n def _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):\n post_init_original = getattr(_cls, '__post_init__', None)\n- _cls.__post_init__ = post_init\n+ if post_init_original and post_init_original.__name__ == '_pydantic_post_init':\n+ post_init_original = None\n+ _cls.__post_init__ = _pydantic_post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)\n \n fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}\n", "issue": "\u0421reating a child model causes a RecursionError exception\n<!-- Questions, Feature Requests, and Bug Reports are all welcome -->\r\n<!-- delete as applicable: -->\r\n# Bug\r\n\r\n* OS: **Ubuntu 14.04**\r\n* Python version `import sys; print(sys.version)`: **3.6.7**\r\n* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**\r\n\r\nI'm expecting, that I can use a classic inheritance for dataclass models:\r\n```py\r\nimport pydantic.dataclasses\r\n\r\[email protected]\r\nclass A:\r\n a: str = None\r\n\r\[email protected]\r\nclass B(A):\r\n b: str = None\r\n\r\nB(a='a', b='b')\r\n```\r\n\r\nBut as a result I'm receiving this:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2881, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-9-10a7116ca691>\", line 12, in <module>\r\n B(a='a', b='b')\r\n File \"<string>\", line 4, in __init__\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 13, in post_init\r\n self.__post_init_original__()\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 13, in post_init\r\n self.__post_init_original__()\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 13, in post_init\r\n self.__post_init_original__()\r\n [Previous line repeated 952 more times]\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py\", line 9, in post_init\r\n d = validate_model(self.__pydantic_model__, self.__dict__)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/main.py\", line 484, in validate_model\r\n v_, errors_ = field.validate(value, values, loc=field.alias, cls=model.__class__)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py\", line 303, in validate\r\n v, errors = self._validate_singleton(v, values, loc, cls)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py\", line 406, in _validate_singleton\r\n return self._apply_validators(v, values, loc, cls, self.validators)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py\", line 412, in _apply_validators\r\n v = validator(v)\r\n File \"/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/validators.py\", line 23, in str_validator\r\n if isinstance(v, (str, NoneType)):\r\nRecursionError: maximum recursion depth exceeded in __instancecheck__\r\n```\r\nThis line below causes this problem:\r\nhttps://github.com/samuelcolvin/pydantic/blob/master/pydantic/dataclasses.py#L13\r\n\r\nUPD: defining `__post_init__` in child model fixes the problem, but this is workaround.\r\n```py\r\nimport pydantic.dataclasses\r\n\r\[email protected]\r\nclass A:\r\n a: str = None\r\n\r\[email protected]\r\nclass B(A):\r\n b: str = None\r\n\r\n def __post_init__():\r\n pass\r\n\r\nB(a='a', b='b')\r\n```\n", "before_files": [{"content": "import dataclasses\n\nfrom pydantic import ValidationError\n\nfrom .main import create_model, validate_model\n\n\ndef post_init(self):\n d = validate_model(self.__pydantic_model__, self.__dict__)\n object.__setattr__(self, '__dict__', d)\n object.__setattr__(self, '__initialised__', True)\n if self.__post_init_original__:\n self.__post_init_original__()\n\n\ndef setattr_validate_assignment(self, name, value):\n if self.__initialised__:\n d = dict(self.__dict__)\n d.pop(name)\n value, error_ = self.__pydantic_model__.__fields__[name].validate(value, d, loc=name)\n if error_:\n raise ValidationError([error_])\n\n object.__setattr__(self, name, value)\n\n\ndef _process_class(_cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment):\n post_init_original = getattr(_cls, '__post_init__', None)\n _cls.__post_init__ = post_init\n cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen)\n\n fields = {name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items()}\n cls.__post_init_original__ = post_init_original\n cls.__pydantic_model__ = create_model(cls.__name__, **fields)\n cls.__initialised__ = False\n\n if validate_assignment and not frozen:\n cls.__setattr__ = setattr_validate_assignment\n return cls\n\n\ndef dataclass(\n _cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, validate_assignment=False\n):\n \"\"\"\n Like the python standard lib dataclasses but with type validation.\n\n Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning\n as Config.validate_assignment.\n \"\"\"\n\n def wrap(cls):\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment)\n\n if _cls is None:\n return wrap\n\n return wrap(_cls)\n", "path": "pydantic/dataclasses.py"}]} | 2,016 | 272 |
gh_patches_debug_18778 | rasdani/github-patches | git_diff | vacanza__python-holidays-1782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The calculation of the lunar start date throws a TypeError exception for Thailand and Cambodia
version: 0.47
stack trace:
"python39\lib\site-packages\holidays\calendars\thai.py", line 233, in _get_start_date
return _ThaiLunisolar.START_DATE + td(days=delta_days)
TypeError: unsupported type for timedelta days component: numpy.int32
</issue>
<code>
[start of holidays/helpers.py]
1 # holidays
2 # --------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: Vacanza Team and individual contributors (see AUTHORS file)
8 # dr-prodigy <[email protected]> (c) 2017-2023
9 # ryanss <[email protected]> (c) 2014-2017
10 # Website: https://github.com/vacanza/python-holidays
11 # License: MIT (see LICENSE file)
12
13
14 def _normalize_arguments(cls, value):
15 """Normalize arguments.
16
17 :param cls:
18 A type of arguments to normalize.
19
20 :param value:
21 Either a single item or an iterable of `cls` type.
22
23 :return:
24 A set created from `value` argument.
25
26 """
27 if isinstance(value, cls):
28 return {value}
29
30 return set(value) if value is not None else set()
31
32
33 def _normalize_tuple(data):
34 """Normalize tuple.
35
36 :param data:
37 Either a tuple or a tuple of tuples.
38
39 :return:
40 An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).
41 An object put into a tuple otherwise, e.g., ((JAN, 10),).
42 """
43 return data if not data or isinstance(data[0], tuple) else (data,)
44
[end of holidays/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/holidays/helpers.py b/holidays/helpers.py
--- a/holidays/helpers.py
+++ b/holidays/helpers.py
@@ -24,13 +24,19 @@
A set created from `value` argument.
"""
+ if value is None:
+ return set()
+
if isinstance(value, cls):
return {value}
- return set(value) if value is not None else set()
+ try:
+ return {v if isinstance(v, cls) else cls(v) for v in value}
+ except TypeError: # non-iterable
+ return {value if isinstance(value, cls) else cls(value)}
-def _normalize_tuple(data):
+def _normalize_tuple(value):
"""Normalize tuple.
:param data:
@@ -40,4 +46,4 @@
An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).
An object put into a tuple otherwise, e.g., ((JAN, 10),).
"""
- return data if not data or isinstance(data[0], tuple) else (data,)
+ return value if not value or isinstance(value[0], tuple) else (value,)
| {"golden_diff": "diff --git a/holidays/helpers.py b/holidays/helpers.py\n--- a/holidays/helpers.py\n+++ b/holidays/helpers.py\n@@ -24,13 +24,19 @@\n A set created from `value` argument.\n \n \"\"\"\n+ if value is None:\n+ return set()\n+\n if isinstance(value, cls):\n return {value}\n \n- return set(value) if value is not None else set()\n+ try:\n+ return {v if isinstance(v, cls) else cls(v) for v in value}\n+ except TypeError: # non-iterable\n+ return {value if isinstance(value, cls) else cls(value)}\n \n \n-def _normalize_tuple(data):\n+def _normalize_tuple(value):\n \"\"\"Normalize tuple.\n \n :param data:\n@@ -40,4 +46,4 @@\n An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).\n An object put into a tuple otherwise, e.g., ((JAN, 10),).\n \"\"\"\n- return data if not data or isinstance(data[0], tuple) else (data,)\n+ return value if not value or isinstance(value[0], tuple) else (value,)\n", "issue": "The calculation of the lunar start date throws a TypeError exception for Thailand and Cambodia\nversion: 0.47\r\nstack trace:\r\n\"python39\\lib\\site-packages\\holidays\\calendars\\thai.py\", line 233, in _get_start_date\r\n return _ThaiLunisolar.START_DATE + td(days=delta_days)\r\nTypeError: unsupported type for timedelta days component: numpy.int32\n", "before_files": [{"content": "# holidays\n# --------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: Vacanza Team and individual contributors (see AUTHORS file)\n# dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/vacanza/python-holidays\n# License: MIT (see LICENSE file)\n\n\ndef _normalize_arguments(cls, value):\n \"\"\"Normalize arguments.\n\n :param cls:\n A type of arguments to normalize.\n\n :param value:\n Either a single item or an iterable of `cls` type.\n\n :return:\n A set created from `value` argument.\n\n \"\"\"\n if isinstance(value, cls):\n return {value}\n\n return set(value) if value is not None else set()\n\n\ndef _normalize_tuple(data):\n \"\"\"Normalize tuple.\n\n :param data:\n Either a tuple or a tuple of tuples.\n\n :return:\n An unchanged object for tuple of tuples, e.g., ((JAN, 10), (DEC, 31)).\n An object put into a tuple otherwise, e.g., ((JAN, 10),).\n \"\"\"\n return data if not data or isinstance(data[0], tuple) else (data,)\n", "path": "holidays/helpers.py"}]} | 1,049 | 271 |
gh_patches_debug_7912 | rasdani/github-patches | git_diff | mars-project__mars-1502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]Cannot pass zero-dimensional tensor as both arguments into `mt.arctan2`
## Describe the bug
When I use the `mt.arctan2`, if i pass zero-dimensional tensors as both arguments, an error will be raised. If any argument is not zero-dimensional, the codes will work.
## To Reproduce
```
vx = mt.dot((1,0,0),(0,1,0))
vy = mt.dot((1,0,0),(0,0,1))
t = mt.arctan2(vx, vy)
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
~/anaconda3/lib/python3.7/site-packages/mars/core.py in __len__(self)
533 try:
--> 534 return self.shape[0]
535 except IndexError:
IndexError: tuple index out of range
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-23-09c63447ea86> in <module>
----> 1 mt.arctan2(vx, vy)
~/anaconda3/lib/python3.7/site-packages/mars/tensor/utils.py in h(*tensors, **kw)
256 kw['dtype'] = dtype
257
--> 258 ret = func(*tensors, **kw)
259 if ret is NotImplemented:
260 reverse_func = getattr(inspect.getmodule(func), 'r{0}'.format(func.__name__), None) \
~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/arctan2.py in arctan2(x1, x2, out, where, **kwargs)
125 """
126 op = TensorArctan2(**kwargs)
--> 127 return op(x1, x2, out=out, where=where)
~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in __call__(self, x1, x2, out, where)
268
269 def __call__(self, x1, x2, out=None, where=None):
--> 270 return self._call(x1, x2, out=out, where=where)
271
272 def rcall(self, x1, x2, out=None, where=None):
~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in _call(self, x1, x2, out, where)
251
252 inputs = filter_inputs([x1, x2, out, where])
--> 253 t = self.new_tensor(inputs, shape, order=order)
254
255 if out is None:
~/anaconda3/lib/python3.7/site-packages/mars/tensor/operands.py in new_tensor(self, inputs, shape, dtype, order, **kw)
77 raise TypeError('cannot new tensor with more than 1 outputs')
78
---> 79 return self.new_tensors(inputs, shape=shape, dtype=dtype, order=order, **kw)[0]
80
81 @classmethod
~/anaconda3/lib/python3.7/site-packages/mars/tensor/operands.py in new_tensors(self, inputs, shape, dtype, order, chunks, nsplits, output_limit, kws, **kw)
71 output_limit=None, kws=None, **kw):
72 return self.new_tileables(inputs, shape=shape, chunks=chunks, nsplits=nsplits,
---> 73 output_limit=output_limit, kws=kws, dtype=dtype, order=order, **kw)
74
75 def new_tensor(self, inputs, shape, dtype=None, order=None, **kw):
~/anaconda3/lib/python3.7/site-packages/mars/operands.py in new_tileables(self, inputs, kws, **kw)
352 """
353
--> 354 tileables = self._new_tileables(inputs, kws=kws, **kw)
355 if is_eager_mode():
356 ExecutableTuple(tileables).execute(fetch=False)
~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in _new_tileables(self, inputs, kws, **kw)
70
71 def _new_tileables(self, inputs, kws=None, **kw):
---> 72 self._set_sparse(inputs)
73 return super()._new_tileables(
74 inputs, kws=kws, **kw)
~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in _set_sparse(self, inputs)
188 x1 = self._lhs if np.isscalar(self._lhs) else next(inputs_iter)
189 x2 = self._rhs if np.isscalar(self._rhs) else next(inputs_iter)
--> 190 setattr(self, '_sparse', self._is_sparse(x1, x2))
191
192 def _set_inputs(self, inputs):
~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/arctan2.py in _is_sparse(cls, x1, x2)
33 if hasattr(x1, 'issparse') and x1.issparse() and np.isscalar(x2):
34 return True
---> 35 elif x1 == 0:
36 return True
37 return False
~/anaconda3/lib/python3.7/site-packages/mars/tensor/core.py in __len__(self)
279
280 def __len__(self):
--> 281 return len(self._data)
282
283 @property
~/anaconda3/lib/python3.7/site-packages/mars/core.py in __len__(self)
536 if build_mode().is_build_mode:
537 return 0
--> 538 raise TypeError('len() of unsized object')
539
540 @property
TypeError: len() of unsized object
```
</issue>
<code>
[start of mars/tensor/arithmetic/arctan2.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2020 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import numpy as np
18
19 from ... import opcodes as OperandDef
20 from ..utils import infer_dtype
21 from .core import TensorBinOp
22 from .utils import arithmetic_operand
23
24
25 @arithmetic_operand
26 class TensorArctan2(TensorBinOp):
27 _op_type_ = OperandDef.ARCTAN2
28 _func_name = 'arctan2'
29
30 @classmethod
31 def _is_sparse(cls, x1, x2):
32 # x2 is sparse or not does not matter
33 if hasattr(x1, 'issparse') and x1.issparse() and np.isscalar(x2):
34 return True
35 elif x1 == 0:
36 return True
37 return False
38
39
40 @infer_dtype(np.arctan2)
41 def arctan2(x1, x2, out=None, where=None, **kwargs):
42 """
43 Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
44
45 The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
46 the signed angle in radians between the ray ending at the origin and
47 passing through the point (1,0), and the ray ending at the origin and
48 passing through the point (`x2`, `x1`). (Note the role reversal: the
49 "`y`-coordinate" is the first function parameter, the "`x`-coordinate"
50 is the second.) By IEEE convention, this function is defined for
51 `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
52 Notes for specific values).
53
54 This function is not defined for complex-valued arguments; for the
55 so-called argument of complex values, use `angle`.
56
57 Parameters
58 ----------
59 x1 : array_like, real-valued
60 `y`-coordinates.
61 x2 : array_like, real-valued
62 `x`-coordinates. `x2` must be broadcastable to match the shape of
63 `x1` or vice versa.
64 out : Tensor, None, or tuple of Tensor and None, optional
65 A location into which the result is stored. If provided, it must have
66 a shape that the inputs broadcast to. If not provided or `None`,
67 a freshly-allocated tensor is returned. A tuple (possible only as a
68 keyword argument) must have length equal to the number of outputs.
69 where : array_like, optional
70 Values of True indicate to calculate the ufunc at that position, values
71 of False indicate to leave the value in the output alone.
72 **kwargs
73
74 Returns
75 -------
76 angle : Tensor
77 Array of angles in radians, in the range ``[-pi, pi]``.
78
79 See Also
80 --------
81 arctan, tan, angle
82
83 Notes
84 -----
85 *arctan2* is identical to the `atan2` function of the underlying
86 C library. The following special values are defined in the C
87 standard: [1]_
88
89 ====== ====== ================
90 `x1` `x2` `arctan2(x1,x2)`
91 ====== ====== ================
92 +/- 0 +0 +/- 0
93 +/- 0 -0 +/- pi
94 > 0 +/-inf +0 / +pi
95 < 0 +/-inf -0 / -pi
96 +/-inf +inf +/- (pi/4)
97 +/-inf -inf +/- (3*pi/4)
98 ====== ====== ================
99
100 Note that +0 and -0 are distinct floating point numbers, as are +inf
101 and -inf.
102
103 References
104 ----------
105 .. [1] ISO/IEC standard 9899:1999, "Programming language C."
106
107 Examples
108 --------
109 Consider four points in different quadrants:
110 >>> import mars.tensor as mt
111
112 >>> x = mt.array([-1, +1, +1, -1])
113 >>> y = mt.array([-1, -1, +1, +1])
114 >>> (mt.arctan2(y, x) * 180 / mt.pi).execute()
115 array([-135., -45., 45., 135.])
116
117 Note the order of the parameters. `arctan2` is defined also when `x2` = 0
118 and at several other special points, obtaining values in
119 the range ``[-pi, pi]``:
120
121 >>> mt.arctan2([1., -1.], [0., 0.]).execute()
122 array([ 1.57079633, -1.57079633])
123 >>> mt.arctan2([0., 0., mt.inf], [+0., -0., mt.inf]).execute()
124 array([ 0. , 3.14159265, 0.78539816])
125 """
126 op = TensorArctan2(**kwargs)
127 return op(x1, x2, out=out, where=where)
128
[end of mars/tensor/arithmetic/arctan2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mars/tensor/arithmetic/arctan2.py b/mars/tensor/arithmetic/arctan2.py
--- a/mars/tensor/arithmetic/arctan2.py
+++ b/mars/tensor/arithmetic/arctan2.py
@@ -29,11 +29,12 @@
@classmethod
def _is_sparse(cls, x1, x2):
- # x2 is sparse or not does not matter
- if hasattr(x1, 'issparse') and x1.issparse() and np.isscalar(x2):
- return True
- elif x1 == 0:
+ if hasattr(x1, 'issparse') and x1.issparse():
+ # if x1 is sparse, will be sparse always
return True
+ elif np.isscalar(x1) and x1 == 0:
+ # x1 == 0, return sparse if x2 is
+ return x2.issparse() if hasattr(x2, 'issparse') else False
return False
| {"golden_diff": "diff --git a/mars/tensor/arithmetic/arctan2.py b/mars/tensor/arithmetic/arctan2.py\n--- a/mars/tensor/arithmetic/arctan2.py\n+++ b/mars/tensor/arithmetic/arctan2.py\n@@ -29,11 +29,12 @@\n \n @classmethod\n def _is_sparse(cls, x1, x2):\n- # x2 is sparse or not does not matter\n- if hasattr(x1, 'issparse') and x1.issparse() and np.isscalar(x2):\n- return True\n- elif x1 == 0:\n+ if hasattr(x1, 'issparse') and x1.issparse():\n+ # if x1 is sparse, will be sparse always\n return True\n+ elif np.isscalar(x1) and x1 == 0:\n+ # x1 == 0, return sparse if x2 is\n+ return x2.issparse() if hasattr(x2, 'issparse') else False\n return False\n", "issue": "[BUG]Cannot pass zero-dimensional tensor as both arguments into `mt.arctan2`\n## Describe the bug\r\nWhen I use the `mt.arctan2`, if i pass zero-dimensional tensors as both arguments, an error will be raised. If any argument is not zero-dimensional, the codes will work.\r\n\r\n## To Reproduce\r\n```\r\nvx = mt.dot((1,0,0),(0,1,0))\r\nvy = mt.dot((1,0,0),(0,0,1))\r\nt = mt.arctan2(vx, vy)\r\n---------------------------------------------------------------------------\r\nIndexError Traceback (most recent call last)\r\n~/anaconda3/lib/python3.7/site-packages/mars/core.py in __len__(self)\r\n 533 try:\r\n--> 534 return self.shape[0]\r\n 535 except IndexError:\r\n\r\nIndexError: tuple index out of range\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-23-09c63447ea86> in <module>\r\n----> 1 mt.arctan2(vx, vy)\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/utils.py in h(*tensors, **kw)\r\n 256 kw['dtype'] = dtype\r\n 257 \r\n--> 258 ret = func(*tensors, **kw)\r\n 259 if ret is NotImplemented:\r\n 260 reverse_func = getattr(inspect.getmodule(func), 'r{0}'.format(func.__name__), None) \\\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/arctan2.py in arctan2(x1, x2, out, where, **kwargs)\r\n 125 \"\"\"\r\n 126 op = TensorArctan2(**kwargs)\r\n--> 127 return op(x1, x2, out=out, where=where)\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in __call__(self, x1, x2, out, where)\r\n 268 \r\n 269 def __call__(self, x1, x2, out=None, where=None):\r\n--> 270 return self._call(x1, x2, out=out, where=where)\r\n 271 \r\n 272 def rcall(self, x1, x2, out=None, where=None):\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in _call(self, x1, x2, out, where)\r\n 251 \r\n 252 inputs = filter_inputs([x1, x2, out, where])\r\n--> 253 t = self.new_tensor(inputs, shape, order=order)\r\n 254 \r\n 255 if out is None:\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/operands.py in new_tensor(self, inputs, shape, dtype, order, **kw)\r\n 77 raise TypeError('cannot new tensor with more than 1 outputs')\r\n 78 \r\n---> 79 return self.new_tensors(inputs, shape=shape, dtype=dtype, order=order, **kw)[0]\r\n 80 \r\n 81 @classmethod\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/operands.py in new_tensors(self, inputs, shape, dtype, order, chunks, nsplits, output_limit, kws, **kw)\r\n 71 output_limit=None, kws=None, **kw):\r\n 72 return self.new_tileables(inputs, shape=shape, chunks=chunks, nsplits=nsplits,\r\n---> 73 output_limit=output_limit, kws=kws, dtype=dtype, order=order, **kw)\r\n 74 \r\n 75 def new_tensor(self, inputs, shape, dtype=None, order=None, **kw):\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/operands.py in new_tileables(self, inputs, kws, **kw)\r\n 352 \"\"\"\r\n 353 \r\n--> 354 tileables = self._new_tileables(inputs, kws=kws, **kw)\r\n 355 if is_eager_mode():\r\n 356 ExecutableTuple(tileables).execute(fetch=False)\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in _new_tileables(self, inputs, kws, **kw)\r\n 70 \r\n 71 def _new_tileables(self, inputs, kws=None, **kw):\r\n---> 72 self._set_sparse(inputs)\r\n 73 return super()._new_tileables(\r\n 74 inputs, kws=kws, **kw)\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/core.py in _set_sparse(self, inputs)\r\n 188 x1 = self._lhs if np.isscalar(self._lhs) else next(inputs_iter)\r\n 189 x2 = self._rhs if np.isscalar(self._rhs) else next(inputs_iter)\r\n--> 190 setattr(self, '_sparse', self._is_sparse(x1, x2))\r\n 191 \r\n 192 def _set_inputs(self, inputs):\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/arithmetic/arctan2.py in _is_sparse(cls, x1, x2)\r\n 33 if hasattr(x1, 'issparse') and x1.issparse() and np.isscalar(x2):\r\n 34 return True\r\n---> 35 elif x1 == 0:\r\n 36 return True\r\n 37 return False\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/tensor/core.py in __len__(self)\r\n 279 \r\n 280 def __len__(self):\r\n--> 281 return len(self._data)\r\n 282 \r\n 283 @property\r\n\r\n~/anaconda3/lib/python3.7/site-packages/mars/core.py in __len__(self)\r\n 536 if build_mode().is_build_mode:\r\n 537 return 0\r\n--> 538 raise TypeError('len() of unsized object')\r\n 539 \r\n 540 @property\r\n\r\nTypeError: len() of unsized object\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..utils import infer_dtype\nfrom .core import TensorBinOp\nfrom .utils import arithmetic_operand\n\n\n@arithmetic_operand\nclass TensorArctan2(TensorBinOp):\n _op_type_ = OperandDef.ARCTAN2\n _func_name = 'arctan2'\n\n @classmethod\n def _is_sparse(cls, x1, x2):\n # x2 is sparse or not does not matter\n if hasattr(x1, 'issparse') and x1.issparse() and np.isscalar(x2):\n return True\n elif x1 == 0:\n return True\n return False\n\n\n@infer_dtype(np.arctan2)\ndef arctan2(x1, x2, out=None, where=None, **kwargs):\n \"\"\"\n Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.\n\n The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is\n the signed angle in radians between the ray ending at the origin and\n passing through the point (1,0), and the ray ending at the origin and\n passing through the point (`x2`, `x1`). (Note the role reversal: the\n \"`y`-coordinate\" is the first function parameter, the \"`x`-coordinate\"\n is the second.) By IEEE convention, this function is defined for\n `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see\n Notes for specific values).\n\n This function is not defined for complex-valued arguments; for the\n so-called argument of complex values, use `angle`.\n\n Parameters\n ----------\n x1 : array_like, real-valued\n `y`-coordinates.\n x2 : array_like, real-valued\n `x`-coordinates. `x2` must be broadcastable to match the shape of\n `x1` or vice versa.\n out : Tensor, None, or tuple of Tensor and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated tensor is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n where : array_like, optional\n Values of True indicate to calculate the ufunc at that position, values\n of False indicate to leave the value in the output alone.\n **kwargs\n\n Returns\n -------\n angle : Tensor\n Array of angles in radians, in the range ``[-pi, pi]``.\n\n See Also\n --------\n arctan, tan, angle\n\n Notes\n -----\n *arctan2* is identical to the `atan2` function of the underlying\n C library. The following special values are defined in the C\n standard: [1]_\n\n ====== ====== ================\n `x1` `x2` `arctan2(x1,x2)`\n ====== ====== ================\n +/- 0 +0 +/- 0\n +/- 0 -0 +/- pi\n > 0 +/-inf +0 / +pi\n < 0 +/-inf -0 / -pi\n +/-inf +inf +/- (pi/4)\n +/-inf -inf +/- (3*pi/4)\n ====== ====== ================\n\n Note that +0 and -0 are distinct floating point numbers, as are +inf\n and -inf.\n\n References\n ----------\n .. [1] ISO/IEC standard 9899:1999, \"Programming language C.\"\n\n Examples\n --------\n Consider four points in different quadrants:\n >>> import mars.tensor as mt\n\n >>> x = mt.array([-1, +1, +1, -1])\n >>> y = mt.array([-1, -1, +1, +1])\n >>> (mt.arctan2(y, x) * 180 / mt.pi).execute()\n array([-135., -45., 45., 135.])\n\n Note the order of the parameters. `arctan2` is defined also when `x2` = 0\n and at several other special points, obtaining values in\n the range ``[-pi, pi]``:\n\n >>> mt.arctan2([1., -1.], [0., 0.]).execute()\n array([ 1.57079633, -1.57079633])\n >>> mt.arctan2([0., 0., mt.inf], [+0., -0., mt.inf]).execute()\n array([ 0. , 3.14159265, 0.78539816])\n \"\"\"\n op = TensorArctan2(**kwargs)\n return op(x1, x2, out=out, where=where)\n", "path": "mars/tensor/arithmetic/arctan2.py"}]} | 3,571 | 235 |
gh_patches_debug_19646 | rasdani/github-patches | git_diff | netbox-community__netbox-16351 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Staged Changes: InventoryItem Fails Sync
### NetBox version
v3.5.1
### Python version
3.10
### Steps to Reproduce
Currently when working with staged changes there is a edge-case where the staged changes failed on the merge.
I believe this is because of the serialiser removing key parts of the data required to recreate the device when `merge()` is called.
https://github.com/netbox-community/netbox/blob/f5a1f83f9fa9d98c945d21eb0f7ccb8cd37fbf59/netbox/netbox/staging.py#L119
And when `serialize_object` is called it removes all MPTT model fields: https://github.com/netbox-community/netbox/blob/f5a1f83f9fa9d98c945d21eb0f7ccb8cd37fbf59/netbox/utilities/utils.py#L157
This causes the merge to fail because the MPTT fields are now null. In the staged changes table.
Error message:
```
>>> Branch.objects.all()[0].merge()
TEST
<RestrictedQuerySet [<StagedChange: Create dcim.manufacturer (182)>, <StagedChange: Create dcim.devicetype (177)>, <StagedChange: Create dcim.devicerole (120)>, <StagedChange: Update dcim.site (124)>, <StagedChange: Create dcim.device (9608)>, <StagedChange: Create dcim.device (9609)>, <StagedChange: Create dcim.inventoryitem (848)>, <StagedChange: Create dcim.inventoryitem (849)>]>
Create dcim.manufacturer (182)
Create dcim.devicetype (177)
Create dcim.devicerole (120)
Update dcim.site (124)
Create dcim.device (9608)
Create dcim.device (9609)
Create dcim.inventoryitem (848)
Traceback (most recent call last):
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
psycopg2.errors.NotNullViolation: null value in column "lft" of relation "dcim_inventoryitem" violates not-null constraint
DETAIL: Failing row contains (2023-08-09 12:54:07.062+00, 2023-08-09 12:54:07.062+00, {}, 848, Inventory Item 1, , , , , , null, f, null, null, null, null, 9608, null, null, null, null, null).
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/opt/netbox/netbox/extras/models/staging.py", line 52, in merge
change.apply()
File "/opt/netbox/netbox/extras/models/staging.py", line 107, in apply
instance.save()
File "/opt/netbox/venv/lib/python3.10/site-packages/django/core/serializers/base.py", line 288, in save
models.Model.save_base(self.object, using=using, raw=True, **kwargs)
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/base.py", line 863, in save_base
updated = self._save_table(
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/base.py", line 1006, in _save_table
results = self._do_insert(
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/base.py", line 1047, in _do_insert
return manager._insert(
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/query.py", line 1791, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/sql/compiler.py", line 1660, in execute_sql
cursor.execute(sql, params)
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py", line 102, in execute
return super().execute(sql, params)
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py", line 80, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
with self.db.wrap_database_errors:
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/utils.py", line 91, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
django.db.utils.IntegrityError: null value in column "lft" of relation "dcim_inventoryitem" violates not-null constraint
DETAIL: Failing row contains (2023-08-09 12:54:07.062+00, 2023-08-09 12:54:07.062+00, {}, 848, Inventory Item 1, , , , , , null, f, null, null, null, null, 9608, null, null, null, null, null).
```
Script to recreate:
```python
from netbox.staging import checkout
from extras.models import Branch
from dcim.models import Device, VirtualChassis, DeviceType, Manufacturer, Site, DeviceRole, InventoryItem
Branch.objects.all().delete()
branch = Branch.objects.create(name='Branch 1')
Device.objects.filter(name="test device").delete()
Device.objects.filter(name="test device1").delete()
VirtualChassis.objects.filter(name='VC1').delete()
DeviceType.objects.filter(slug='device-type-1').delete()
Manufacturer.objects.filter(slug='manufacturer-1').delete()
DeviceRole.objects.filter(slug='device-role-1').delete()
with checkout(branch):
manufacturer = Manufacturer.objects.create(
name='Manufacturer', slug='manufacturer-1')
device_type = DeviceType.objects.create(
manufacturer=manufacturer, model='Device Type 1', slug='device-type-1'
)
device_role = DeviceRole.objects.create(
name='Device Role', slug='device-role-1'
)
site, _ = Site.objects.update_or_create(
defaults={'name': "test"}, slug="test")
device, _ = Device.objects.update_or_create(defaults={
"name": "test device", "device_role": device_role, "device_type": device_type, "site": site}, serial="123")
device1, _ = Device.objects.update_or_create(defaults={
"name": "test device1", "device_role": device_role, "device_type": device_type, "site": site}, serial="543")
inv1 = InventoryItem.objects.create(device=device, name='Inventory Item 1'),
inv2 = InventoryItem.objects.create(device=device1, name='Inventory Item 2'),
branch.merge()
print("DONE")
```
### Expected Behavior
If there is a serializer or argument to pass to the serializer that does not remove key fields to recreate objects from the staged changes table.
### Observed Behavior
The serialized object has key fields removed which are required to save the object.
</issue>
<code>
[start of netbox/extras/models/staging.py]
1 import logging
2
3 from django.contrib.auth import get_user_model
4 from django.contrib.contenttypes.fields import GenericForeignKey
5 from django.db import models, transaction
6 from django.utils.translation import gettext_lazy as _
7
8 from extras.choices import ChangeActionChoices
9 from netbox.models import ChangeLoggedModel
10 from netbox.models.features import *
11 from utilities.serialization import deserialize_object
12
13 __all__ = (
14 'Branch',
15 'StagedChange',
16 )
17
18 logger = logging.getLogger('netbox.staging')
19
20
21 class Branch(ChangeLoggedModel):
22 """
23 A collection of related StagedChanges.
24 """
25 name = models.CharField(
26 verbose_name=_('name'),
27 max_length=100,
28 unique=True
29 )
30 description = models.CharField(
31 verbose_name=_('description'),
32 max_length=200,
33 blank=True
34 )
35 user = models.ForeignKey(
36 to=get_user_model(),
37 on_delete=models.SET_NULL,
38 blank=True,
39 null=True
40 )
41
42 class Meta:
43 ordering = ('name',)
44 verbose_name = _('branch')
45 verbose_name_plural = _('branches')
46
47 def __str__(self):
48 return f'{self.name} ({self.pk})'
49
50 def merge(self):
51 logger.info(f'Merging changes in branch {self}')
52 with transaction.atomic():
53 for change in self.staged_changes.all():
54 change.apply()
55 self.staged_changes.all().delete()
56
57
58 class StagedChange(CustomValidationMixin, EventRulesMixin, models.Model):
59 """
60 The prepared creation, modification, or deletion of an object to be applied to the active database at a
61 future point.
62 """
63 branch = models.ForeignKey(
64 to=Branch,
65 on_delete=models.CASCADE,
66 related_name='staged_changes'
67 )
68 action = models.CharField(
69 verbose_name=_('action'),
70 max_length=20,
71 choices=ChangeActionChoices
72 )
73 object_type = models.ForeignKey(
74 to='contenttypes.ContentType',
75 on_delete=models.CASCADE,
76 related_name='+'
77 )
78 object_id = models.PositiveBigIntegerField(
79 blank=True,
80 null=True
81 )
82 object = GenericForeignKey(
83 ct_field='object_type',
84 fk_field='object_id'
85 )
86 data = models.JSONField(
87 verbose_name=_('data'),
88 blank=True,
89 null=True
90 )
91
92 class Meta:
93 ordering = ('pk',)
94 indexes = (
95 models.Index(fields=('object_type', 'object_id')),
96 )
97 verbose_name = _('staged change')
98 verbose_name_plural = _('staged changes')
99
100 def __str__(self):
101 action = self.get_action_display()
102 app_label, model_name = self.object_type.natural_key()
103 return f"{action} {app_label}.{model_name} ({self.object_id})"
104
105 @property
106 def model(self):
107 return self.object_type.model_class()
108
109 def apply(self):
110 """
111 Apply the staged create/update/delete action to the database.
112 """
113 if self.action == ChangeActionChoices.ACTION_CREATE:
114 instance = deserialize_object(self.model, self.data, pk=self.object_id)
115 logger.info(f'Creating {self.model._meta.verbose_name} {instance}')
116 instance.save()
117
118 if self.action == ChangeActionChoices.ACTION_UPDATE:
119 instance = deserialize_object(self.model, self.data, pk=self.object_id)
120 logger.info(f'Updating {self.model._meta.verbose_name} {instance}')
121 instance.save()
122
123 if self.action == ChangeActionChoices.ACTION_DELETE:
124 instance = self.model.objects.get(pk=self.object_id)
125 logger.info(f'Deleting {self.model._meta.verbose_name} {instance}')
126 instance.delete()
127 apply.alters_data = True
128
129 def get_action_color(self):
130 return ChangeActionChoices.colors.get(self.action)
131
[end of netbox/extras/models/staging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/extras/models/staging.py b/netbox/extras/models/staging.py
--- a/netbox/extras/models/staging.py
+++ b/netbox/extras/models/staging.py
@@ -4,6 +4,7 @@
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models, transaction
from django.utils.translation import gettext_lazy as _
+from mptt.models import MPTTModel
from extras.choices import ChangeActionChoices
from netbox.models import ChangeLoggedModel
@@ -124,6 +125,11 @@
instance = self.model.objects.get(pk=self.object_id)
logger.info(f'Deleting {self.model._meta.verbose_name} {instance}')
instance.delete()
+
+ # Rebuild the MPTT tree where applicable
+ if issubclass(self.model, MPTTModel):
+ self.model.objects.rebuild()
+
apply.alters_data = True
def get_action_color(self):
| {"golden_diff": "diff --git a/netbox/extras/models/staging.py b/netbox/extras/models/staging.py\n--- a/netbox/extras/models/staging.py\n+++ b/netbox/extras/models/staging.py\n@@ -4,6 +4,7 @@\n from django.contrib.contenttypes.fields import GenericForeignKey\n from django.db import models, transaction\n from django.utils.translation import gettext_lazy as _\n+from mptt.models import MPTTModel\n \n from extras.choices import ChangeActionChoices\n from netbox.models import ChangeLoggedModel\n@@ -124,6 +125,11 @@\n instance = self.model.objects.get(pk=self.object_id)\n logger.info(f'Deleting {self.model._meta.verbose_name} {instance}')\n instance.delete()\n+\n+ # Rebuild the MPTT tree where applicable\n+ if issubclass(self.model, MPTTModel):\n+ self.model.objects.rebuild()\n+\n apply.alters_data = True\n \n def get_action_color(self):\n", "issue": "Staged Changes: InventoryItem Fails Sync\n### NetBox version\n\nv3.5.1\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\nCurrently when working with staged changes there is a edge-case where the staged changes failed on the merge. \r\n\r\nI believe this is because of the serialiser removing key parts of the data required to recreate the device when `merge()` is called. \r\n\r\nhttps://github.com/netbox-community/netbox/blob/f5a1f83f9fa9d98c945d21eb0f7ccb8cd37fbf59/netbox/netbox/staging.py#L119\r\n\r\nAnd when `serialize_object` is called it removes all MPTT model fields: https://github.com/netbox-community/netbox/blob/f5a1f83f9fa9d98c945d21eb0f7ccb8cd37fbf59/netbox/utilities/utils.py#L157\r\nThis causes the merge to fail because the MPTT fields are now null. In the staged changes table. \r\n\r\nError message:\r\n\r\n```\r\n>>> Branch.objects.all()[0].merge()\r\nTEST\r\n<RestrictedQuerySet [<StagedChange: Create dcim.manufacturer (182)>, <StagedChange: Create dcim.devicetype (177)>, <StagedChange: Create dcim.devicerole (120)>, <StagedChange: Update dcim.site (124)>, <StagedChange: Create dcim.device (9608)>, <StagedChange: Create dcim.device (9609)>, <StagedChange: Create dcim.inventoryitem (848)>, <StagedChange: Create dcim.inventoryitem (849)>]>\r\nCreate dcim.manufacturer (182)\r\nCreate dcim.devicetype (177)\r\nCreate dcim.devicerole (120)\r\nUpdate dcim.site (124)\r\nCreate dcim.device (9608)\r\nCreate dcim.device (9609)\r\nCreate dcim.inventoryitem (848)\r\nTraceback (most recent call last):\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\npsycopg2.errors.NotNullViolation: null value in column \"lft\" of relation \"dcim_inventoryitem\" violates not-null constraint\r\nDETAIL: Failing row contains (2023-08-09 12:54:07.062+00, 2023-08-09 12:54:07.062+00, {}, 848, Inventory Item 1, , , , , , null, f, null, null, null, null, 9608, null, null, null, null, null).\r\n\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/opt/netbox/netbox/extras/models/staging.py\", line 52, in merge\r\n change.apply()\r\n File \"/opt/netbox/netbox/extras/models/staging.py\", line 107, in apply\r\n instance.save()\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/core/serializers/base.py\", line 288, in save\r\n models.Model.save_base(self.object, using=using, raw=True, **kwargs)\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/base.py\", line 863, in save_base\r\n updated = self._save_table(\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/base.py\", line 1006, in _save_table\r\n results = self._do_insert(\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/base.py\", line 1047, in _do_insert\r\n return manager._insert(\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/manager.py\", line 85, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/query.py\", line 1791, in _insert\r\n return query.get_compiler(using=using).execute_sql(returning_fields)\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/models/sql/compiler.py\", line 1660, in execute_sql\r\n cursor.execute(sql, params)\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py\", line 102, in execute\r\n return super().execute(sql, params)\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py\", line 67, in execute\r\n return self._execute_with_wrappers(\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py\", line 80, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n with self.db.wrap_database_errors:\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/utils.py\", line 91, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/opt/netbox/venv/lib/python3.10/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\ndjango.db.utils.IntegrityError: null value in column \"lft\" of relation \"dcim_inventoryitem\" violates not-null constraint\r\nDETAIL: Failing row contains (2023-08-09 12:54:07.062+00, 2023-08-09 12:54:07.062+00, {}, 848, Inventory Item 1, , , , , , null, f, null, null, null, null, 9608, null, null, null, null, null).\r\n\r\n```\r\n\r\nScript to recreate:\r\n\r\n```python\r\nfrom netbox.staging import checkout\r\nfrom extras.models import Branch\r\nfrom dcim.models import Device, VirtualChassis, DeviceType, Manufacturer, Site, DeviceRole, InventoryItem\r\n\r\nBranch.objects.all().delete()\r\nbranch = Branch.objects.create(name='Branch 1')\r\n\r\nDevice.objects.filter(name=\"test device\").delete()\r\nDevice.objects.filter(name=\"test device1\").delete()\r\nVirtualChassis.objects.filter(name='VC1').delete()\r\nDeviceType.objects.filter(slug='device-type-1').delete()\r\nManufacturer.objects.filter(slug='manufacturer-1').delete()\r\nDeviceRole.objects.filter(slug='device-role-1').delete()\r\n\r\n\r\nwith checkout(branch):\r\n\r\n manufacturer = Manufacturer.objects.create(\r\n name='Manufacturer', slug='manufacturer-1')\r\n device_type = DeviceType.objects.create(\r\n manufacturer=manufacturer, model='Device Type 1', slug='device-type-1'\r\n )\r\n device_role = DeviceRole.objects.create(\r\n name='Device Role', slug='device-role-1'\r\n )\r\n site, _ = Site.objects.update_or_create(\r\n defaults={'name': \"test\"}, slug=\"test\")\r\n device, _ = Device.objects.update_or_create(defaults={\r\n \"name\": \"test device\", \"device_role\": device_role, \"device_type\": device_type, \"site\": site}, serial=\"123\")\r\n device1, _ = Device.objects.update_or_create(defaults={\r\n \"name\": \"test device1\", \"device_role\": device_role, \"device_type\": device_type, \"site\": site}, serial=\"543\")\r\n inv1 = InventoryItem.objects.create(device=device, name='Inventory Item 1'),\r\n inv2 = InventoryItem.objects.create(device=device1, name='Inventory Item 2'),\r\n\r\n\r\n\r\nbranch.merge()\r\n\r\nprint(\"DONE\")\r\n\r\n```\r\n\n\n### Expected Behavior\n\nIf there is a serializer or argument to pass to the serializer that does not remove key fields to recreate objects from the staged changes table. \n\n### Observed Behavior\n\nThe serialized object has key fields removed which are required to save the object. \n", "before_files": [{"content": "import logging\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.db import models, transaction\nfrom django.utils.translation import gettext_lazy as _\n\nfrom extras.choices import ChangeActionChoices\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import *\nfrom utilities.serialization import deserialize_object\n\n__all__ = (\n 'Branch',\n 'StagedChange',\n)\n\nlogger = logging.getLogger('netbox.staging')\n\n\nclass Branch(ChangeLoggedModel):\n \"\"\"\n A collection of related StagedChanges.\n \"\"\"\n name = models.CharField(\n verbose_name=_('name'),\n max_length=100,\n unique=True\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True\n )\n user = models.ForeignKey(\n to=get_user_model(),\n on_delete=models.SET_NULL,\n blank=True,\n null=True\n )\n\n class Meta:\n ordering = ('name',)\n verbose_name = _('branch')\n verbose_name_plural = _('branches')\n\n def __str__(self):\n return f'{self.name} ({self.pk})'\n\n def merge(self):\n logger.info(f'Merging changes in branch {self}')\n with transaction.atomic():\n for change in self.staged_changes.all():\n change.apply()\n self.staged_changes.all().delete()\n\n\nclass StagedChange(CustomValidationMixin, EventRulesMixin, models.Model):\n \"\"\"\n The prepared creation, modification, or deletion of an object to be applied to the active database at a\n future point.\n \"\"\"\n branch = models.ForeignKey(\n to=Branch,\n on_delete=models.CASCADE,\n related_name='staged_changes'\n )\n action = models.CharField(\n verbose_name=_('action'),\n max_length=20,\n choices=ChangeActionChoices\n )\n object_type = models.ForeignKey(\n to='contenttypes.ContentType',\n on_delete=models.CASCADE,\n related_name='+'\n )\n object_id = models.PositiveBigIntegerField(\n blank=True,\n null=True\n )\n object = GenericForeignKey(\n ct_field='object_type',\n fk_field='object_id'\n )\n data = models.JSONField(\n verbose_name=_('data'),\n blank=True,\n null=True\n )\n\n class Meta:\n ordering = ('pk',)\n indexes = (\n models.Index(fields=('object_type', 'object_id')),\n )\n verbose_name = _('staged change')\n verbose_name_plural = _('staged changes')\n\n def __str__(self):\n action = self.get_action_display()\n app_label, model_name = self.object_type.natural_key()\n return f\"{action} {app_label}.{model_name} ({self.object_id})\"\n\n @property\n def model(self):\n return self.object_type.model_class()\n\n def apply(self):\n \"\"\"\n Apply the staged create/update/delete action to the database.\n \"\"\"\n if self.action == ChangeActionChoices.ACTION_CREATE:\n instance = deserialize_object(self.model, self.data, pk=self.object_id)\n logger.info(f'Creating {self.model._meta.verbose_name} {instance}')\n instance.save()\n\n if self.action == ChangeActionChoices.ACTION_UPDATE:\n instance = deserialize_object(self.model, self.data, pk=self.object_id)\n logger.info(f'Updating {self.model._meta.verbose_name} {instance}')\n instance.save()\n\n if self.action == ChangeActionChoices.ACTION_DELETE:\n instance = self.model.objects.get(pk=self.object_id)\n logger.info(f'Deleting {self.model._meta.verbose_name} {instance}')\n instance.delete()\n apply.alters_data = True\n\n def get_action_color(self):\n return ChangeActionChoices.colors.get(self.action)\n", "path": "netbox/extras/models/staging.py"}]} | 3,508 | 210 |
gh_patches_debug_4285 | rasdani/github-patches | git_diff | conda__conda-build-1397 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--output reports wrong folder for noarch packages
In linux-64 packages build with the noarch flag end up in the appropriate directory, but the --output function does not return the correct path.
</issue>
<code>
[start of conda_build/render.py]
1 # (c) Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 from __future__ import absolute_import, division, print_function
8
9 from locale import getpreferredencoding
10 import os
11 from os.path import isdir, isfile, abspath
12 import subprocess
13 import sys
14 import tarfile
15 import tempfile
16
17 import yaml
18
19 from .conda_interface import PY3
20
21 from conda_build import exceptions, utils
22 from conda_build.metadata import MetaData
23 import conda_build.source as source
24 from conda_build.completers import all_versions, conda_version
25 from conda_build.utils import rm_rf
26
27
28 def set_language_env_vars(args, parser, config, execute=None):
29 """Given args passed into conda command, set language env vars"""
30 for lang in all_versions:
31 versions = getattr(args, lang)
32 if not versions:
33 continue
34 if versions == ['all']:
35 if all_versions[lang]:
36 versions = all_versions[lang]
37 else:
38 parser.error("'all' is not supported for --%s" % lang)
39 if len(versions) > 1:
40 for ver in versions[:]:
41 setattr(args, lang, [str(ver)])
42 if execute:
43 execute(args, parser, config)
44 # This is necessary to make all combinations build.
45 setattr(args, lang, versions)
46 return
47 else:
48 version = versions[0]
49 if lang in ('python', 'numpy'):
50 version = int(version.replace('.', ''))
51 setattr(config, conda_version[lang], version)
52 if not len(str(version)) in (2, 3) and lang in ['python', 'numpy']:
53 if all_versions[lang]:
54 raise RuntimeError("%s must be major.minor, like %s, not %s" %
55 (conda_version[lang], all_versions[lang][-1] / 10, version))
56 else:
57 raise RuntimeError("%s must be major.minor, not %s" %
58 (conda_version[lang], version))
59
60 # Using --python, --numpy etc. is equivalent to using CONDA_PY, CONDA_NPY, etc.
61 # Auto-set those env variables
62 for var in conda_version.values():
63 if hasattr(config, var) and getattr(config, var):
64 # Set the env variable.
65 os.environ[var] = str(getattr(config, var))
66
67
68 def bldpkg_path(m, config):
69 '''
70 Returns path to built package's tarball given its ``Metadata``.
71 '''
72 return os.path.join(config.bldpkgs_dir, '%s.tar.bz2' % m.dist())
73
74
75 def parse_or_try_download(metadata, no_download_source, config,
76 force_download=False):
77
78 need_reparse_in_env = False
79 if (force_download or (not no_download_source and (metadata.uses_vcs_in_meta or
80 metadata.uses_setup_py_in_meta))):
81
82 # this try/catch is for when the tool to download source is actually in
83 # meta.yaml, and not previously installed in builder env.
84 try:
85 if not config.dirty:
86 source.provide(metadata.path, metadata.get_section('source'), config=config)
87 need_source_download = False
88 try:
89 metadata.parse_again(config=config, permit_undefined_jinja=False)
90 except (ImportError, exceptions.UnableToParseMissingSetuptoolsDependencies):
91 need_reparse_in_env = True
92 except subprocess.CalledProcessError as error:
93 print("Warning: failed to download source. If building, will try "
94 "again after downloading recipe dependencies.")
95 print("Error was: ")
96 print(error)
97 need_source_download = True
98
99 elif not metadata.get_section('source'):
100 need_source_download = False
101 if not os.path.isdir(config.work_dir):
102 os.makedirs(config.work_dir)
103 else:
104 # we have not downloaded source in the render phase. Download it in
105 # the build phase
106 need_source_download = not no_download_source
107 if not need_reparse_in_env:
108 try:
109 metadata.parse_until_resolved(config=config)
110 except exceptions.UnableToParseMissingSetuptoolsDependencies:
111 need_reparse_in_env = True
112 return metadata, need_source_download, need_reparse_in_env
113
114
115 def reparse(metadata, config):
116 """Some things need to be parsed again after the build environment has been created
117 and activated."""
118 sys.path.insert(0, config.build_prefix)
119 sys.path.insert(0, utils.get_site_packages(config.build_prefix))
120 metadata.parse_again(config=config, permit_undefined_jinja=False)
121
122
123 def render_recipe(recipe_path, config, no_download_source=False):
124 arg = recipe_path
125 # Don't use byte literals for paths in Python 2
126 if not PY3:
127 arg = arg.decode(getpreferredencoding() or 'utf-8')
128 if isfile(arg):
129 if arg.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):
130 recipe_dir = tempfile.mkdtemp()
131 t = tarfile.open(arg, 'r:*')
132 t.extractall(path=recipe_dir)
133 t.close()
134 need_cleanup = True
135 else:
136 print("Ignoring non-recipe: %s" % arg)
137 return
138 else:
139 recipe_dir = abspath(arg)
140 need_cleanup = False
141
142 if not isdir(recipe_dir):
143 sys.exit("Error: no such directory: %s" % recipe_dir)
144
145 if config.set_build_id:
146 # updates a unique build id if not already computed
147 config.compute_build_id(os.path.basename(recipe_dir))
148 try:
149 m = MetaData(recipe_dir, config=config)
150 except exceptions.YamlParsingError as e:
151 sys.stderr.write(e.error_msg())
152 sys.exit(1)
153
154 config.noarch = m.get_value('build/noarch')
155 m, need_download, need_reparse_in_env = parse_or_try_download(m,
156 no_download_source=no_download_source,
157 config=config)
158
159 if need_cleanup:
160 rm_rf(recipe_dir)
161
162 return m, need_download, need_reparse_in_env
163
164
165 # Next bit of stuff is to support YAML output in the order we expect.
166 # http://stackoverflow.com/a/17310199/1170370
167 class _MetaYaml(dict):
168 fields = ["package", "source", "build", "requirements", "test", "about", "extra"]
169
170 def to_omap(self):
171 return [(field, self[field]) for field in _MetaYaml.fields if field in self]
172
173
174 def _represent_omap(dumper, data):
175 return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.to_omap())
176
177
178 def _unicode_representer(dumper, uni):
179 node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
180 return node
181
182
183 class _IndentDumper(yaml.Dumper):
184 def increase_indent(self, flow=False, indentless=False):
185 return super(_IndentDumper, self).increase_indent(flow, False)
186
187 yaml.add_representer(_MetaYaml, _represent_omap)
188 if PY3:
189 yaml.add_representer(str, _unicode_representer)
190 unicode = None # silence pyflakes about unicode not existing in py3
191 else:
192 yaml.add_representer(unicode, _unicode_representer)
193
194
195 def output_yaml(metadata, filename=None):
196 output = yaml.dump(_MetaYaml(metadata.meta), Dumper=_IndentDumper,
197 default_flow_style=False, indent=4)
198 if filename:
199 with open(filename, "w") as f:
200 f.write(output)
201 return "Wrote yaml to %s" % filename
202 else:
203 return output
204
[end of conda_build/render.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_build/render.py b/conda_build/render.py
--- a/conda_build/render.py
+++ b/conda_build/render.py
@@ -69,7 +69,8 @@
'''
Returns path to built package's tarball given its ``Metadata``.
'''
- return os.path.join(config.bldpkgs_dir, '%s.tar.bz2' % m.dist())
+ output_dir = m.info_index()['subdir']
+ return os.path.join(os.path.dirname(config.bldpkgs_dir), output_dir, '%s.tar.bz2' % m.dist())
def parse_or_try_download(metadata, no_download_source, config,
| {"golden_diff": "diff --git a/conda_build/render.py b/conda_build/render.py\n--- a/conda_build/render.py\n+++ b/conda_build/render.py\n@@ -69,7 +69,8 @@\n '''\n Returns path to built package's tarball given its ``Metadata``.\n '''\n- return os.path.join(config.bldpkgs_dir, '%s.tar.bz2' % m.dist())\n+ output_dir = m.info_index()['subdir']\n+ return os.path.join(os.path.dirname(config.bldpkgs_dir), output_dir, '%s.tar.bz2' % m.dist())\n \n \n def parse_or_try_download(metadata, no_download_source, config,\n", "issue": "--output reports wrong folder for noarch packages\nIn linux-64 packages build with the noarch flag end up in the appropriate directory, but the --output function does not return the correct path. \n\n", "before_files": [{"content": "# (c) Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom locale import getpreferredencoding\nimport os\nfrom os.path import isdir, isfile, abspath\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\n\nimport yaml\n\nfrom .conda_interface import PY3\n\nfrom conda_build import exceptions, utils\nfrom conda_build.metadata import MetaData\nimport conda_build.source as source\nfrom conda_build.completers import all_versions, conda_version\nfrom conda_build.utils import rm_rf\n\n\ndef set_language_env_vars(args, parser, config, execute=None):\n \"\"\"Given args passed into conda command, set language env vars\"\"\"\n for lang in all_versions:\n versions = getattr(args, lang)\n if not versions:\n continue\n if versions == ['all']:\n if all_versions[lang]:\n versions = all_versions[lang]\n else:\n parser.error(\"'all' is not supported for --%s\" % lang)\n if len(versions) > 1:\n for ver in versions[:]:\n setattr(args, lang, [str(ver)])\n if execute:\n execute(args, parser, config)\n # This is necessary to make all combinations build.\n setattr(args, lang, versions)\n return\n else:\n version = versions[0]\n if lang in ('python', 'numpy'):\n version = int(version.replace('.', ''))\n setattr(config, conda_version[lang], version)\n if not len(str(version)) in (2, 3) and lang in ['python', 'numpy']:\n if all_versions[lang]:\n raise RuntimeError(\"%s must be major.minor, like %s, not %s\" %\n (conda_version[lang], all_versions[lang][-1] / 10, version))\n else:\n raise RuntimeError(\"%s must be major.minor, not %s\" %\n (conda_version[lang], version))\n\n # Using --python, --numpy etc. is equivalent to using CONDA_PY, CONDA_NPY, etc.\n # Auto-set those env variables\n for var in conda_version.values():\n if hasattr(config, var) and getattr(config, var):\n # Set the env variable.\n os.environ[var] = str(getattr(config, var))\n\n\ndef bldpkg_path(m, config):\n '''\n Returns path to built package's tarball given its ``Metadata``.\n '''\n return os.path.join(config.bldpkgs_dir, '%s.tar.bz2' % m.dist())\n\n\ndef parse_or_try_download(metadata, no_download_source, config,\n force_download=False):\n\n need_reparse_in_env = False\n if (force_download or (not no_download_source and (metadata.uses_vcs_in_meta or\n metadata.uses_setup_py_in_meta))):\n\n # this try/catch is for when the tool to download source is actually in\n # meta.yaml, and not previously installed in builder env.\n try:\n if not config.dirty:\n source.provide(metadata.path, metadata.get_section('source'), config=config)\n need_source_download = False\n try:\n metadata.parse_again(config=config, permit_undefined_jinja=False)\n except (ImportError, exceptions.UnableToParseMissingSetuptoolsDependencies):\n need_reparse_in_env = True\n except subprocess.CalledProcessError as error:\n print(\"Warning: failed to download source. If building, will try \"\n \"again after downloading recipe dependencies.\")\n print(\"Error was: \")\n print(error)\n need_source_download = True\n\n elif not metadata.get_section('source'):\n need_source_download = False\n if not os.path.isdir(config.work_dir):\n os.makedirs(config.work_dir)\n else:\n # we have not downloaded source in the render phase. Download it in\n # the build phase\n need_source_download = not no_download_source\n if not need_reparse_in_env:\n try:\n metadata.parse_until_resolved(config=config)\n except exceptions.UnableToParseMissingSetuptoolsDependencies:\n need_reparse_in_env = True\n return metadata, need_source_download, need_reparse_in_env\n\n\ndef reparse(metadata, config):\n \"\"\"Some things need to be parsed again after the build environment has been created\n and activated.\"\"\"\n sys.path.insert(0, config.build_prefix)\n sys.path.insert(0, utils.get_site_packages(config.build_prefix))\n metadata.parse_again(config=config, permit_undefined_jinja=False)\n\n\ndef render_recipe(recipe_path, config, no_download_source=False):\n arg = recipe_path\n # Don't use byte literals for paths in Python 2\n if not PY3:\n arg = arg.decode(getpreferredencoding() or 'utf-8')\n if isfile(arg):\n if arg.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):\n recipe_dir = tempfile.mkdtemp()\n t = tarfile.open(arg, 'r:*')\n t.extractall(path=recipe_dir)\n t.close()\n need_cleanup = True\n else:\n print(\"Ignoring non-recipe: %s\" % arg)\n return\n else:\n recipe_dir = abspath(arg)\n need_cleanup = False\n\n if not isdir(recipe_dir):\n sys.exit(\"Error: no such directory: %s\" % recipe_dir)\n\n if config.set_build_id:\n # updates a unique build id if not already computed\n config.compute_build_id(os.path.basename(recipe_dir))\n try:\n m = MetaData(recipe_dir, config=config)\n except exceptions.YamlParsingError as e:\n sys.stderr.write(e.error_msg())\n sys.exit(1)\n\n config.noarch = m.get_value('build/noarch')\n m, need_download, need_reparse_in_env = parse_or_try_download(m,\n no_download_source=no_download_source,\n config=config)\n\n if need_cleanup:\n rm_rf(recipe_dir)\n\n return m, need_download, need_reparse_in_env\n\n\n# Next bit of stuff is to support YAML output in the order we expect.\n# http://stackoverflow.com/a/17310199/1170370\nclass _MetaYaml(dict):\n fields = [\"package\", \"source\", \"build\", \"requirements\", \"test\", \"about\", \"extra\"]\n\n def to_omap(self):\n return [(field, self[field]) for field in _MetaYaml.fields if field in self]\n\n\ndef _represent_omap(dumper, data):\n return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.to_omap())\n\n\ndef _unicode_representer(dumper, uni):\n node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)\n return node\n\n\nclass _IndentDumper(yaml.Dumper):\n def increase_indent(self, flow=False, indentless=False):\n return super(_IndentDumper, self).increase_indent(flow, False)\n\nyaml.add_representer(_MetaYaml, _represent_omap)\nif PY3:\n yaml.add_representer(str, _unicode_representer)\n unicode = None # silence pyflakes about unicode not existing in py3\nelse:\n yaml.add_representer(unicode, _unicode_representer)\n\n\ndef output_yaml(metadata, filename=None):\n output = yaml.dump(_MetaYaml(metadata.meta), Dumper=_IndentDumper,\n default_flow_style=False, indent=4)\n if filename:\n with open(filename, \"w\") as f:\n f.write(output)\n return \"Wrote yaml to %s\" % filename\n else:\n return output\n", "path": "conda_build/render.py"}]} | 2,792 | 141 |
gh_patches_debug_35455 | rasdani/github-patches | git_diff | DataDog__dd-agent-773 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support for varnish instances by name
We have several varnish instances on one server. Each has a name and can be referenced with `-n name` via the command line tools.
```
-n Specifies the name of the varnishd instance to get logs from. If -n is not specified, the host name is used.
```
(from https://www.varnish-cache.org/docs/3.0/reference/varnishstat.html)
It would be nice if the name could be specified in the yaml and the check would use it automatically. I've tried adding `-n name` to the bin location but that fails.
I'll probably try to add this later when I have a chance, unless someone else can do it quickly.
<!---
@huboard:{"order":770.0}
-->
</issue>
<code>
[start of checks.d/varnish.py]
1 import xml.parsers.expat # python 2.4 compatible
2 import re
3 import subprocess
4
5 from checks import AgentCheck
6
7 class Varnish(AgentCheck):
8 # XML parsing bits, a.k.a. Kafka in Code
9 def _reset(self):
10 self._current_element = ""
11 self._current_metric = "varnish"
12 self._current_value = 0
13 self._current_str = ""
14 self._current_type = ""
15
16 def _start_element(self, name, attrs):
17 self._current_element = name
18
19 def _end_element(self, name):
20 if name == "stat":
21 m_name = self.normalize(self._current_metric)
22 if self._current_type in ("a", "c"):
23 self.rate(m_name, long(self._current_value))
24 elif self._current_type in ("i", "g"):
25 self.gauge(m_name, long(self._current_value))
26 else:
27 # Unsupported data type, ignore
28 self._reset()
29 return # don't save
30
31 # reset for next stat element
32 self._reset()
33 elif name in ("type", "ident", "name"):
34 self._current_metric += "." + self._current_str
35
36 def _char_data(self, data):
37 self.log.debug("Data %s [%s]" % (data, self._current_element))
38 data = data.strip()
39 if len(data) > 0 and self._current_element != "":
40 if self._current_element == "value":
41 self._current_value = long(data)
42 elif self._current_element == "flag":
43 self._current_type = data
44 else:
45 self._current_str = data
46
47 def check(self, instance):
48 """Extract stats from varnishstat -x
49
50 The text option (-1) is not reliable enough when counters get large.
51 VBE.media_video_prd_services_01(10.93.67.16,,8080).happy18446744073709551615
52
53 2 types of data, "a" for counter ("c" in newer versions of varnish), "i" for gauge ("g")
54 https://github.com/varnish/Varnish-Cache/blob/master/include/tbl/vsc_fields.h
55
56 Bitmaps are not supported.
57
58 <varnishstat>
59 <stat>
60 <name>fetch_304</name>
61 <value>0</value>
62 <flag>a</flag>
63 <description>Fetch no body (304)</description>
64 </stat>
65 <stat>
66 <name>n_sess_mem</name>
67 <value>334</value>
68 <flag>i</flag>
69 <description>N struct sess_mem</description>
70 </stat>
71 <stat>
72 <type>LCK</type>
73 <ident>vcl</ident>
74 <name>creat</name>
75 <value>1</value>
76 <flag>a</flag>
77 <description>Created locks</description>
78 </stat>
79 </varnishstat>
80 """
81 # Not configured? Not a problem.
82 if instance.get("varnishstat", None) is None:
83 raise Exception("varnishstat is not configured")
84 tags = instance.get('tags', [])
85
86 # Get the varnish version from varnishstat
87 output, error = subprocess.Popen([instance.get("varnishstat"), "-V"],
88 stdout=subprocess.PIPE,
89 stderr=subprocess.PIPE).communicate()
90
91 # Assumptions regarding varnish's version
92 use_xml = True
93 arg = "-x" # varnishstat argument
94 version = 3
95
96 m1 = re.search(r"varnish-(\d+)", output, re.MULTILINE)
97 # v2 prints the version on stderr, v3 on stdout
98 m2 = re.search(r"varnish-(\d+)", error, re.MULTILINE)
99
100 if m1 is None and m2 is None:
101 self.log.warn("Cannot determine the version of varnishstat, assuming 3 or greater")
102 self.warning("Cannot determine the version of varnishstat, assuming 3 or greater")
103 else:
104 if m1 is not None:
105 version = int(m1.group(1))
106 elif m2 is not None:
107 version = int(m2.group(1))
108
109 self.log.debug("Varnish version: %d" % version)
110
111 # Location of varnishstat
112 if version <= 2:
113 use_xml = False
114 arg = "-1"
115
116 output, error = subprocess.Popen([instance.get("varnishstat"), arg],
117 stdout=subprocess.PIPE,
118 stderr=subprocess.PIPE).communicate()
119 if error and len(error) > 0:
120 self.log.error(error)
121 self._parse_varnishstat(output, use_xml, tags)
122
123 def _parse_varnishstat(self, output, use_xml, tags=None):
124 tags = tags or []
125 if use_xml:
126 p = xml.parsers.expat.ParserCreate()
127 p.StartElementHandler = self._start_element
128 p.EndElementHandler = self._end_element
129 p.CharacterDataHandler = self._char_data
130 self._reset()
131 p.Parse(output, True)
132 else:
133 for line in output.split("\n"):
134 self.log.debug("Parsing varnish results: %s" % line)
135 fields = line.split()
136 if len(fields) < 3:
137 break
138 name, gauge_val, rate_val = fields[0], fields[1], fields[2]
139 metric_name = self.normalize(name, prefix="varnish")
140
141 # Now figure out which value to pick
142 if rate_val.lower() in ("nan", "."):
143 # col 2 matters
144 self.log.debug("Varnish (gauge) %s %d" % (metric_name, int(gauge_val)))
145 self.gauge(metric_name, int(gauge_val))
146 else:
147 # col 3 has a rate (since restart)
148 self.log.debug("Varnish (rate) %s %d" % (metric_name, int(gauge_val)))
149 self.rate(metric_name, float(gauge_val))
150
151 @staticmethod
152 def parse_agent_config(agentConfig):
153 if not agentConfig.get('varnishstat'):
154 return False
155
156 return {
157 'instances': [{'varnishstat': agentConfig.get('varnishstat')}]
158 }
159
[end of checks.d/varnish.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/varnish.py b/checks.d/varnish.py
--- a/checks.d/varnish.py
+++ b/checks.d/varnish.py
@@ -82,6 +82,7 @@
if instance.get("varnishstat", None) is None:
raise Exception("varnishstat is not configured")
tags = instance.get('tags', [])
+ name = instance.get('name')
# Get the varnish version from varnishstat
output, error = subprocess.Popen([instance.get("varnishstat"), "-V"],
@@ -113,9 +114,19 @@
use_xml = False
arg = "-1"
- output, error = subprocess.Popen([instance.get("varnishstat"), arg],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).communicate()
+ cmd = [instance.get("varnishstat"), arg]
+ if name is not None:
+ cmd.extend(['-n', name])
+ tags += [u'varnish_name:%s' % name]
+ else:
+ tags += [u'varnish_name:default']
+ try:
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output, error = proc.communicate()
+ except Exception:
+ self.log.error(u"Failed to run %s" % repr(cmd))
+ raise
if error and len(error) > 0:
self.log.error(error)
self._parse_varnishstat(output, use_xml, tags)
@@ -142,11 +153,11 @@
if rate_val.lower() in ("nan", "."):
# col 2 matters
self.log.debug("Varnish (gauge) %s %d" % (metric_name, int(gauge_val)))
- self.gauge(metric_name, int(gauge_val))
+ self.gauge(metric_name, int(gauge_val), tags=tags)
else:
# col 3 has a rate (since restart)
self.log.debug("Varnish (rate) %s %d" % (metric_name, int(gauge_val)))
- self.rate(metric_name, float(gauge_val))
+ self.rate(metric_name, float(gauge_val), tags=tags)
@staticmethod
def parse_agent_config(agentConfig):
| {"golden_diff": "diff --git a/checks.d/varnish.py b/checks.d/varnish.py\n--- a/checks.d/varnish.py\n+++ b/checks.d/varnish.py\n@@ -82,6 +82,7 @@\n if instance.get(\"varnishstat\", None) is None:\n raise Exception(\"varnishstat is not configured\")\n tags = instance.get('tags', [])\n+ name = instance.get('name')\n \n # Get the varnish version from varnishstat\n output, error = subprocess.Popen([instance.get(\"varnishstat\"), \"-V\"],\n@@ -113,9 +114,19 @@\n use_xml = False\n arg = \"-1\"\n \n- output, error = subprocess.Popen([instance.get(\"varnishstat\"), arg],\n- stdout=subprocess.PIPE,\n- stderr=subprocess.PIPE).communicate()\n+ cmd = [instance.get(\"varnishstat\"), arg]\n+ if name is not None:\n+ cmd.extend(['-n', name])\n+ tags += [u'varnish_name:%s' % name]\n+ else:\n+ tags += [u'varnish_name:default']\n+ try:\n+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n+ stderr=subprocess.PIPE)\n+ output, error = proc.communicate()\n+ except Exception:\n+ self.log.error(u\"Failed to run %s\" % repr(cmd))\n+ raise\n if error and len(error) > 0:\n self.log.error(error)\n self._parse_varnishstat(output, use_xml, tags)\n@@ -142,11 +153,11 @@\n if rate_val.lower() in (\"nan\", \".\"):\n # col 2 matters\n self.log.debug(\"Varnish (gauge) %s %d\" % (metric_name, int(gauge_val)))\n- self.gauge(metric_name, int(gauge_val))\n+ self.gauge(metric_name, int(gauge_val), tags=tags)\n else:\n # col 3 has a rate (since restart)\n self.log.debug(\"Varnish (rate) %s %d\" % (metric_name, int(gauge_val)))\n- self.rate(metric_name, float(gauge_val))\n+ self.rate(metric_name, float(gauge_val), tags=tags)\n \n @staticmethod\n def parse_agent_config(agentConfig):\n", "issue": "Support for varnish instances by name\nWe have several varnish instances on one server. Each has a name and can be referenced with `-n name` via the command line tools.\n\n```\n-n Specifies the name of the varnishd instance to get logs from. If -n is not specified, the host name is used.\n```\n\n(from https://www.varnish-cache.org/docs/3.0/reference/varnishstat.html)\n\nIt would be nice if the name could be specified in the yaml and the check would use it automatically. I've tried adding `-n name` to the bin location but that fails.\n\nI'll probably try to add this later when I have a chance, unless someone else can do it quickly.\n\n<!---\n@huboard:{\"order\":770.0}\n-->\n\n", "before_files": [{"content": "import xml.parsers.expat # python 2.4 compatible\nimport re\nimport subprocess\n\nfrom checks import AgentCheck\n\nclass Varnish(AgentCheck):\n # XML parsing bits, a.k.a. Kafka in Code\n def _reset(self):\n self._current_element = \"\"\n self._current_metric = \"varnish\"\n self._current_value = 0\n self._current_str = \"\"\n self._current_type = \"\"\n\n def _start_element(self, name, attrs):\n self._current_element = name\n\n def _end_element(self, name):\n if name == \"stat\":\n m_name = self.normalize(self._current_metric)\n if self._current_type in (\"a\", \"c\"):\n self.rate(m_name, long(self._current_value))\n elif self._current_type in (\"i\", \"g\"):\n self.gauge(m_name, long(self._current_value))\n else:\n # Unsupported data type, ignore\n self._reset()\n return # don't save\n\n # reset for next stat element\n self._reset()\n elif name in (\"type\", \"ident\", \"name\"):\n self._current_metric += \".\" + self._current_str\n\n def _char_data(self, data):\n self.log.debug(\"Data %s [%s]\" % (data, self._current_element))\n data = data.strip()\n if len(data) > 0 and self._current_element != \"\":\n if self._current_element == \"value\":\n self._current_value = long(data)\n elif self._current_element == \"flag\":\n self._current_type = data\n else:\n self._current_str = data\n\n def check(self, instance):\n \"\"\"Extract stats from varnishstat -x\n\n The text option (-1) is not reliable enough when counters get large.\n VBE.media_video_prd_services_01(10.93.67.16,,8080).happy18446744073709551615\n\n 2 types of data, \"a\" for counter (\"c\" in newer versions of varnish), \"i\" for gauge (\"g\")\n https://github.com/varnish/Varnish-Cache/blob/master/include/tbl/vsc_fields.h\n\n Bitmaps are not supported.\n\n <varnishstat>\n <stat>\n <name>fetch_304</name>\n <value>0</value>\n <flag>a</flag>\n <description>Fetch no body (304)</description>\n </stat>\n <stat>\n <name>n_sess_mem</name>\n <value>334</value>\n <flag>i</flag>\n <description>N struct sess_mem</description>\n </stat>\n <stat>\n <type>LCK</type>\n <ident>vcl</ident>\n <name>creat</name>\n <value>1</value>\n <flag>a</flag>\n <description>Created locks</description>\n </stat>\n </varnishstat>\n \"\"\"\n # Not configured? Not a problem.\n if instance.get(\"varnishstat\", None) is None:\n raise Exception(\"varnishstat is not configured\")\n tags = instance.get('tags', [])\n\n # Get the varnish version from varnishstat\n output, error = subprocess.Popen([instance.get(\"varnishstat\"), \"-V\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n # Assumptions regarding varnish's version\n use_xml = True\n arg = \"-x\" # varnishstat argument\n version = 3\n\n m1 = re.search(r\"varnish-(\\d+)\", output, re.MULTILINE)\n # v2 prints the version on stderr, v3 on stdout\n m2 = re.search(r\"varnish-(\\d+)\", error, re.MULTILINE)\n\n if m1 is None and m2 is None:\n self.log.warn(\"Cannot determine the version of varnishstat, assuming 3 or greater\")\n self.warning(\"Cannot determine the version of varnishstat, assuming 3 or greater\")\n else:\n if m1 is not None:\n version = int(m1.group(1))\n elif m2 is not None:\n version = int(m2.group(1))\n\n self.log.debug(\"Varnish version: %d\" % version)\n\n # Location of varnishstat\n if version <= 2:\n use_xml = False\n arg = \"-1\"\n\n output, error = subprocess.Popen([instance.get(\"varnishstat\"), arg],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n if error and len(error) > 0:\n self.log.error(error)\n self._parse_varnishstat(output, use_xml, tags)\n\n def _parse_varnishstat(self, output, use_xml, tags=None):\n tags = tags or []\n if use_xml:\n p = xml.parsers.expat.ParserCreate()\n p.StartElementHandler = self._start_element\n p.EndElementHandler = self._end_element\n p.CharacterDataHandler = self._char_data\n self._reset()\n p.Parse(output, True)\n else:\n for line in output.split(\"\\n\"):\n self.log.debug(\"Parsing varnish results: %s\" % line)\n fields = line.split()\n if len(fields) < 3:\n break\n name, gauge_val, rate_val = fields[0], fields[1], fields[2]\n metric_name = self.normalize(name, prefix=\"varnish\")\n\n # Now figure out which value to pick\n if rate_val.lower() in (\"nan\", \".\"):\n # col 2 matters\n self.log.debug(\"Varnish (gauge) %s %d\" % (metric_name, int(gauge_val)))\n self.gauge(metric_name, int(gauge_val))\n else:\n # col 3 has a rate (since restart)\n self.log.debug(\"Varnish (rate) %s %d\" % (metric_name, int(gauge_val)))\n self.rate(metric_name, float(gauge_val))\n\n @staticmethod\n def parse_agent_config(agentConfig):\n if not agentConfig.get('varnishstat'):\n return False\n\n return {\n 'instances': [{'varnishstat': agentConfig.get('varnishstat')}]\n }\n", "path": "checks.d/varnish.py"}]} | 2,453 | 511 |
gh_patches_debug_13296 | rasdani/github-patches | git_diff | qtile__qtile-1687 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ampersands need to be escaped in WindowName widget
# Issue description
Ampersands in window names need to be changed in WindowName widget to "&" before being passed to Pango/Cairo
# Qtile version
0.15.1
# Stack traces
Exception: parse_markup() failed for b'Seth Lakeman - King & Country'
2020-04-27 19:12:00,744 ERROR libqtile hook.py:fire():L373 Error in hook focus_change
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/libqtile/hook.py", line 371, in fire
i(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/libqtile/widget/windowname.py", line 67, in update
self.text = "%s%s" % (state, w.name if w and w.name else " ")
File "/usr/lib/python3.8/site-packages/libqtile/widget/base.py", line 323, in text
self.layout.text = self.formatted_text
File "/usr/lib/python3.8/site-packages/libqtile/drawer.py", line 70, in text
attrlist, value, accel_char = pangocffi.parse_markup(value)
File "/usr/lib/python3.8/site-packages/libqtile/pangocffi.py", line 173, in parse_markup
raise Exception("parse_markup() failed for %s" % value)
Exception: parse_markup() failed for b'Seth Lakeman - King & Country'
# Configuration
N/A
</issue>
<code>
[start of libqtile/widget/windowname.py]
1 # Copyright (c) 2008, 2010 Aldo Cortesi
2 # Copyright (c) 2010 matt
3 # Copyright (c) 2011 Mounier Florian
4 # Copyright (c) 2012 Tim Neumann
5 # Copyright (c) 2013 Craig Barnes
6 # Copyright (c) 2014 Sean Vig
7 # Copyright (c) 2014 Tycho Andersen
8 #
9 # Permission is hereby granted, free of charge, to any person obtaining a copy
10 # of this software and associated documentation files (the "Software"), to deal
11 # in the Software without restriction, including without limitation the rights
12 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 # copies of the Software, and to permit persons to whom the Software is
14 # furnished to do so, subject to the following conditions:
15 #
16 # The above copyright notice and this permission notice shall be included in
17 # all copies or substantial portions of the Software.
18 #
19 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 # SOFTWARE.
26
27 from libqtile import bar, hook
28 from libqtile.widget import base
29
30
31 class WindowName(base._TextBox):
32 """Displays the name of the window that currently has focus"""
33 orientations = base.ORIENTATION_HORIZONTAL
34 defaults = [
35 ('show_state', True, 'show window status before window name'),
36 ('for_current_screen', False, 'instead of this bars screen use currently active screen')
37 ]
38
39 def __init__(self, width=bar.STRETCH, **config):
40 base._TextBox.__init__(self, width=width, **config)
41 self.add_defaults(WindowName.defaults)
42
43 def _configure(self, qtile, bar):
44 base._TextBox._configure(self, qtile, bar)
45 hook.subscribe.client_name_updated(self.update)
46 hook.subscribe.focus_change(self.update)
47 hook.subscribe.float_change(self.update)
48
49 @hook.subscribe.current_screen_change
50 def on_screen_changed():
51 if self.for_current_screen:
52 self.update()
53
54 def update(self, *args):
55 if self.for_current_screen:
56 w = self.qtile.current_screen.group.current_window
57 else:
58 w = self.bar.screen.group.current_window
59 state = ''
60 if self.show_state and w is not None:
61 if w.maximized:
62 state = '[] '
63 elif w.minimized:
64 state = '_ '
65 elif w.floating:
66 state = 'V '
67 self.text = "%s%s" % (state, w.name if w and w.name else " ")
68 self.bar.draw()
69
[end of libqtile/widget/windowname.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/windowname.py b/libqtile/widget/windowname.py
--- a/libqtile/widget/windowname.py
+++ b/libqtile/widget/windowname.py
@@ -24,7 +24,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-from libqtile import bar, hook
+from libqtile import bar, hook, pangocffi
from libqtile.widget import base
@@ -64,5 +64,6 @@
state = '_ '
elif w.floating:
state = 'V '
- self.text = "%s%s" % (state, w.name if w and w.name else " ")
+ unescaped = "%s%s" % (state, w.name if w and w.name else " ")
+ self.text = pangocffi.markup_escape_text(unescaped)
self.bar.draw()
| {"golden_diff": "diff --git a/libqtile/widget/windowname.py b/libqtile/widget/windowname.py\n--- a/libqtile/widget/windowname.py\n+++ b/libqtile/widget/windowname.py\n@@ -24,7 +24,7 @@\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n \n-from libqtile import bar, hook\n+from libqtile import bar, hook, pangocffi\n from libqtile.widget import base\n \n \n@@ -64,5 +64,6 @@\n state = '_ '\n elif w.floating:\n state = 'V '\n- self.text = \"%s%s\" % (state, w.name if w and w.name else \" \")\n+ unescaped = \"%s%s\" % (state, w.name if w and w.name else \" \")\n+ self.text = pangocffi.markup_escape_text(unescaped)\n self.bar.draw()\n", "issue": "Ampersands need to be escaped in WindowName widget\n# Issue description\r\nAmpersands in window names need to be changed in WindowName widget to \"&\" before being passed to Pango/Cairo\r\n\r\n# Qtile version\r\n0.15.1\r\n\r\n# Stack traces\r\nException: parse_markup() failed for b'Seth Lakeman - King & Country'\r\n2020-04-27 19:12:00,744 ERROR libqtile hook.py:fire():L373 Error in hook focus_change\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.8/site-packages/libqtile/hook.py\", line 371, in fire\r\n i(*args, **kwargs)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/widget/windowname.py\", line 67, in update\r\n self.text = \"%s%s\" % (state, w.name if w and w.name else \" \")\r\n File \"/usr/lib/python3.8/site-packages/libqtile/widget/base.py\", line 323, in text\r\n self.layout.text = self.formatted_text\r\n File \"/usr/lib/python3.8/site-packages/libqtile/drawer.py\", line 70, in text\r\n attrlist, value, accel_char = pangocffi.parse_markup(value)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/pangocffi.py\", line 173, in parse_markup\r\n raise Exception(\"parse_markup() failed for %s\" % value)\r\nException: parse_markup() failed for b'Seth Lakeman - King & Country'\r\n\r\n# Configuration\r\nN/A\n", "before_files": [{"content": "# Copyright (c) 2008, 2010 Aldo Cortesi\n# Copyright (c) 2010 matt\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Tim Neumann\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile import bar, hook\nfrom libqtile.widget import base\n\n\nclass WindowName(base._TextBox):\n \"\"\"Displays the name of the window that currently has focus\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('show_state', True, 'show window status before window name'),\n ('for_current_screen', False, 'instead of this bars screen use currently active screen')\n ]\n\n def __init__(self, width=bar.STRETCH, **config):\n base._TextBox.__init__(self, width=width, **config)\n self.add_defaults(WindowName.defaults)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n hook.subscribe.client_name_updated(self.update)\n hook.subscribe.focus_change(self.update)\n hook.subscribe.float_change(self.update)\n\n @hook.subscribe.current_screen_change\n def on_screen_changed():\n if self.for_current_screen:\n self.update()\n\n def update(self, *args):\n if self.for_current_screen:\n w = self.qtile.current_screen.group.current_window\n else:\n w = self.bar.screen.group.current_window\n state = ''\n if self.show_state and w is not None:\n if w.maximized:\n state = '[] '\n elif w.minimized:\n state = '_ '\n elif w.floating:\n state = 'V '\n self.text = \"%s%s\" % (state, w.name if w and w.name else \" \")\n self.bar.draw()\n", "path": "libqtile/widget/windowname.py"}]} | 1,673 | 201 |
gh_patches_debug_10207 | rasdani/github-patches | git_diff | certbot__certbot-2248 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VersionConflict: ConfigArgParse 0.9.3
Hello,
I tried renewing our certificates today, and I got an error from letsencrypt-auto. The error message was:
VersionConflict: (ConfigArgParse 0.9.3 (/home/bogdanb/.local/share/letsencrypt/lib/python2.7/site-packages), Requirement.parse('ConfigArgParse>=0.10.0'))
Please see the two attached files for the logs:
[execution.log.txt](https://github.com/letsencrypt/letsencrypt/files/97381/execution.log.txt)
[letencrypt.log.txt](https://github.com/letsencrypt/letsencrypt/files/97383/letencrypt.log.txt)
This is after I had deleted the ~/.local folder where letsencrypt stores its downloads. (I had the same exception before.) This is on an Ubuntu 15.04 machine:
$ uname -a
Linux qotilabs-dime 2.6.32-042stab112.15 #1 SMP Tue Oct 20 17:22:56 MSK 2015 x86_64 x86_64 x86_64 GNU/Linux
</issue>
<code>
[start of setup.py]
1 import codecs
2 import os
3 import re
4 import sys
5
6 from setuptools import setup
7 from setuptools import find_packages
8
9 # Workaround for http://bugs.python.org/issue8876, see
10 # http://bugs.python.org/issue8876#msg208792
11 # This can be removed when using Python 2.7.9 or later:
12 # https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS
13 if os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':
14 del os.link
15
16
17 def read_file(filename, encoding='utf8'):
18 """Read unicode from given file."""
19 with codecs.open(filename, encoding=encoding) as fd:
20 return fd.read()
21
22
23 here = os.path.abspath(os.path.dirname(__file__))
24
25 # read version number (and other metadata) from package init
26 init_fn = os.path.join(here, 'letsencrypt', '__init__.py')
27 meta = dict(re.findall(r"""__([a-z]+)__ = '([^']+)""", read_file(init_fn)))
28
29 readme = read_file(os.path.join(here, 'README.rst'))
30 changes = read_file(os.path.join(here, 'CHANGES.rst'))
31 version = meta['version']
32
33 # Please update tox.ini when modifying dependency version requirements
34 install_requires = [
35 'acme=={0}'.format(version),
36 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17
37 'configobj',
38 'cryptography>=0.7', # load_pem_x509_certificate
39 'parsedatetime',
40 'psutil>=2.1.0', # net_connections introduced in 2.1.0
41 'PyOpenSSL',
42 'pyrfc3339',
43 'python2-pythondialog>=3.2.2rc1', # Debian squeeze support, cf. #280
44 'pytz',
45 'setuptools', # pkg_resources
46 'six',
47 'zope.component',
48 'zope.interface',
49 ]
50
51 # env markers in extras_require cause problems with older pip: #517
52 # Keep in sync with conditional_requirements.py.
53 if sys.version_info < (2, 7):
54 install_requires.extend([
55 # only some distros recognize stdlib argparse as already satisfying
56 'argparse',
57 'mock<1.1.0',
58 ])
59 else:
60 install_requires.append('mock')
61
62 dev_extras = [
63 # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289
64 'astroid==1.3.5',
65 'pylint==1.4.2', # upstream #248
66 'twine',
67 'wheel',
68 ]
69
70 docs_extras = [
71 'repoze.sphinx.autointerface',
72 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
73 'sphinx_rtd_theme',
74 'sphinxcontrib-programoutput',
75 ]
76
77 testing_extras = [
78 'coverage',
79 'nose',
80 'nosexcover',
81 'pep8',
82 'tox',
83 ]
84
85 setup(
86 name='letsencrypt',
87 version=version,
88 description="Let's Encrypt client",
89 long_description=readme, # later: + '\n\n' + changes
90 url='https://github.com/letsencrypt/letsencrypt',
91 author="Let's Encrypt Project",
92 author_email='[email protected]',
93 license='Apache License 2.0',
94 classifiers=[
95 'Development Status :: 3 - Alpha',
96 'Environment :: Console',
97 'Environment :: Console :: Curses',
98 'Intended Audience :: System Administrators',
99 'License :: OSI Approved :: Apache Software License',
100 'Operating System :: POSIX :: Linux',
101 'Programming Language :: Python',
102 'Programming Language :: Python :: 2',
103 'Programming Language :: Python :: 2.6',
104 'Programming Language :: Python :: 2.7',
105 'Topic :: Internet :: WWW/HTTP',
106 'Topic :: Security',
107 'Topic :: System :: Installation/Setup',
108 'Topic :: System :: Networking',
109 'Topic :: System :: Systems Administration',
110 'Topic :: Utilities',
111 ],
112
113 packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),
114 include_package_data=True,
115
116 install_requires=install_requires,
117 extras_require={
118 'dev': dev_extras,
119 'docs': docs_extras,
120 'testing': testing_extras,
121 },
122
123 # to test all packages run "python setup.py test -s
124 # {acme,letsencrypt_apache,letsencrypt_nginx}"
125 test_suite='letsencrypt',
126
127 entry_points={
128 'console_scripts': [
129 'letsencrypt = letsencrypt.cli:main',
130 'letsencrypt-renewer = letsencrypt.renewer:main',
131 ],
132 'letsencrypt.plugins': [
133 'manual = letsencrypt.plugins.manual:Authenticator',
134 'null = letsencrypt.plugins.null:Installer',
135 'standalone = letsencrypt.plugins.standalone:Authenticator',
136 'webroot = letsencrypt.plugins.webroot:Authenticator',
137 ],
138 },
139 )
140
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,10 @@
# Please update tox.ini when modifying dependency version requirements
install_requires = [
'acme=={0}'.format(version),
- 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17
+ # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but
+ # saying so here causes a runtime error against our temporary fork of 0.9.3
+ # in which we added 2.6 support (see #2243), so we relax the requirement.
+ 'ConfigArgParse>=0.9.3',
'configobj',
'cryptography>=0.7', # load_pem_x509_certificate
'parsedatetime',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,10 @@\n # Please update tox.ini when modifying dependency version requirements\n install_requires = [\n 'acme=={0}'.format(version),\n- 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17\n+ # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n+ # saying so here causes a runtime error against our temporary fork of 0.9.3\n+ # in which we added 2.6 support (see #2243), so we relax the requirement.\n+ 'ConfigArgParse>=0.9.3',\n 'configobj',\n 'cryptography>=0.7', # load_pem_x509_certificate\n 'parsedatetime',\n", "issue": "VersionConflict: ConfigArgParse 0.9.3\nHello,\n\nI tried renewing our certificates today, and I got an error from letsencrypt-auto. The error message was:\n\nVersionConflict: (ConfigArgParse 0.9.3 (/home/bogdanb/.local/share/letsencrypt/lib/python2.7/site-packages), Requirement.parse('ConfigArgParse>=0.10.0'))\n\nPlease see the two attached files for the logs:\n[execution.log.txt](https://github.com/letsencrypt/letsencrypt/files/97381/execution.log.txt)\n[letencrypt.log.txt](https://github.com/letsencrypt/letsencrypt/files/97383/letencrypt.log.txt)\n\nThis is after I had deleted the ~/.local folder where letsencrypt stores its downloads. (I had the same exception before.) This is on an Ubuntu 15.04 machine:\n\n$ uname -a\nLinux qotilabs-dime 2.6.32-042stab112.15 #1 SMP Tue Oct 20 17:22:56 MSK 2015 x86_64 x86_64 x86_64 GNU/Linux\n\n", "before_files": [{"content": "import codecs\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n# Workaround for http://bugs.python.org/issue8876, see\n# http://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'letsencrypt', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nchanges = read_file(os.path.join(here, 'CHANGES.rst'))\nversion = meta['version']\n\n# Please update tox.ini when modifying dependency version requirements\ninstall_requires = [\n 'acme=={0}'.format(version),\n 'ConfigArgParse>=0.10.0', # python2.6 support, upstream #17\n 'configobj',\n 'cryptography>=0.7', # load_pem_x509_certificate\n 'parsedatetime',\n 'psutil>=2.1.0', # net_connections introduced in 2.1.0\n 'PyOpenSSL',\n 'pyrfc3339',\n 'python2-pythondialog>=3.2.2rc1', # Debian squeeze support, cf. #280\n 'pytz',\n 'setuptools', # pkg_resources\n 'six',\n 'zope.component',\n 'zope.interface',\n]\n\n# env markers in extras_require cause problems with older pip: #517\n# Keep in sync with conditional_requirements.py.\nif sys.version_info < (2, 7):\n install_requires.extend([\n # only some distros recognize stdlib argparse as already satisfying\n 'argparse',\n 'mock<1.1.0',\n ])\nelse:\n install_requires.append('mock')\n\ndev_extras = [\n # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289\n 'astroid==1.3.5',\n 'pylint==1.4.2', # upstream #248\n 'twine',\n 'wheel',\n]\n\ndocs_extras = [\n 'repoze.sphinx.autointerface',\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n 'sphinxcontrib-programoutput',\n]\n\ntesting_extras = [\n 'coverage',\n 'nose',\n 'nosexcover',\n 'pep8',\n 'tox',\n]\n\nsetup(\n name='letsencrypt',\n version=version,\n description=\"Let's Encrypt client\",\n long_description=readme, # later: + '\\n\\n' + changes\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Let's Encrypt Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n 'testing': testing_extras,\n },\n\n # to test all packages run \"python setup.py test -s\n # {acme,letsencrypt_apache,letsencrypt_nginx}\"\n test_suite='letsencrypt',\n\n entry_points={\n 'console_scripts': [\n 'letsencrypt = letsencrypt.cli:main',\n 'letsencrypt-renewer = letsencrypt.renewer:main',\n ],\n 'letsencrypt.plugins': [\n 'manual = letsencrypt.plugins.manual:Authenticator',\n 'null = letsencrypt.plugins.null:Installer',\n 'standalone = letsencrypt.plugins.standalone:Authenticator',\n 'webroot = letsencrypt.plugins.webroot:Authenticator',\n ],\n },\n)\n", "path": "setup.py"}]} | 2,253 | 203 |
gh_patches_debug_8833 | rasdani/github-patches | git_diff | conan-io__conan-8912 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Conan client fails when server charset is specified.
I'm experimenting with implementing a conan server. While testing said server I ran into problems because my server fully qualifies the "Content-Type" HTTP header to "application/json; charset=utf-8". The conan client though errors when talking to that server with this error: `Response from remote is not json, but 'application/json; charset=utf-8'.`. JSON content requires utf-8 encoding and charset, which makes the addition in the content-type of the charset optional by the server. But regardless of what the charset is, utf-8 or ascii, clients need to handle there being a charset being specified. Which the conan client doesn't appear to be doing.
### Environment Details (include every applicable attribute)
* Operating System+version: 5.11.14-1-MANJARO #1 SMP PREEMPT Wed Apr 14 08:25:29 UTC 2021 x86_64 GNU/Linux
* Compiler+version: NA
* Conan version: 1.35.1
* Python version: 3.9.3
[bug] Conan client fails when server charset is specified.
I'm experimenting with implementing a conan server. While testing said server I ran into problems because my server fully qualifies the "Content-Type" HTTP header to "application/json; charset=utf-8". The conan client though errors when talking to that server with this error: `Response from remote is not json, but 'application/json; charset=utf-8'.`. JSON content requires utf-8 encoding and charset, which makes the addition in the content-type of the charset optional by the server. But regardless of what the charset is, utf-8 or ascii, clients need to handle there being a charset being specified. Which the conan client doesn't appear to be doing.
### Environment Details (include every applicable attribute)
* Operating System+version: 5.11.14-1-MANJARO #1 SMP PREEMPT Wed Apr 14 08:25:29 UTC 2021 x86_64 GNU/Linux
* Compiler+version: NA
* Conan version: 1.35.1
* Python version: 3.9.3
</issue>
<code>
[start of conans/client/rest/rest_client_common.py]
1 import json
2
3 from requests.auth import AuthBase, HTTPBasicAuth
4
5 from conans.client.rest import response_to_str
6 from conans.errors import (EXCEPTION_CODE_MAPPING, ConanException,
7 AuthenticationException, RecipeNotFoundException,
8 PackageNotFoundException)
9 from conans.model.ref import ConanFileReference
10 from conans.util.files import decode_text
11 from conans.util.log import logger
12
13
14 class JWTAuth(AuthBase):
15 """Attaches JWT Authentication to the given Request object."""
16
17 def __init__(self, token):
18 self.token = token
19
20 def __call__(self, request):
21 if self.token:
22 request.headers['Authorization'] = "Bearer %s" % str(self.token)
23 return request
24
25
26 def get_exception_from_error(error_code):
27 tmp = {v: k for k, v in EXCEPTION_CODE_MAPPING.items() # All except NotFound
28 if k not in (RecipeNotFoundException, PackageNotFoundException)}
29 if error_code in tmp:
30 logger.debug("REST ERROR: %s" % str(tmp[error_code]))
31 return tmp[error_code]
32 else:
33 base_error = int(str(error_code)[0] + "00")
34 logger.debug("REST ERROR: %s" % str(base_error))
35 try:
36 return tmp[base_error]
37 except KeyError:
38 return None
39
40
41 def handle_return_deserializer(deserializer=None):
42 """Decorator for rest api methods.
43 Map exceptions and http return codes and deserialize if needed.
44
45 deserializer: Function for deserialize values"""
46
47 def handle_return(method):
48 def inner(*argc, **argv):
49 ret = method(*argc, **argv)
50 if ret.status_code != 200:
51 ret.charset = "utf-8" # To be able to access ret.text (ret.content are bytes)
52 text = ret.text if ret.status_code != 404 else "404 Not found"
53 raise get_exception_from_error(ret.status_code)(text)
54 return deserializer(ret.content) if deserializer else decode_text(ret.content)
55
56 return inner
57
58 return handle_return
59
60
61 class RestCommonMethods(object):
62
63 def __init__(self, remote_url, token, custom_headers, output, requester, config, verify_ssl,
64 artifacts_properties=None, matrix_params=False):
65 self.token = token
66 self.remote_url = remote_url
67 self.custom_headers = custom_headers
68 self._output = output
69 self.requester = requester
70 self._config = config
71 self.verify_ssl = verify_ssl
72 self._artifacts_properties = artifacts_properties
73 self._matrix_params = matrix_params
74
75 @property
76 def auth(self):
77 return JWTAuth(self.token)
78
79 @staticmethod
80 def _check_error_response(ret):
81 if ret.status_code == 401:
82 raise AuthenticationException("Wrong user or password")
83 # Cannot check content-type=text/html, conan server is doing it wrong
84 if not ret.ok or "html>" in str(ret.content):
85 raise ConanException("%s\n\nInvalid server response, check remote URL and "
86 "try again" % str(ret.content))
87
88 def authenticate(self, user, password):
89 """Sends user + password to get:
90 - A plain response with a regular token (not supported refresh in the remote) and None
91 """
92 auth = HTTPBasicAuth(user, password)
93 url = self.router.common_authenticate()
94 logger.debug("REST: Authenticate to get access_token: %s" % url)
95 ret = self.requester.get(url, auth=auth, headers=self.custom_headers,
96 verify=self.verify_ssl)
97
98 self._check_error_response(ret)
99 return decode_text(ret.content)
100
101 def authenticate_oauth(self, user, password):
102 """Sends user + password to get:
103 - A json with an access_token and a refresh token (if supported in the remote)
104 Artifactory >= 6.13.X
105 """
106 url = self.router.oauth_authenticate()
107 auth = HTTPBasicAuth(user, password)
108 headers = {}
109 headers.update(self.custom_headers)
110 headers["Content-type"] = "application/x-www-form-urlencoded"
111 logger.debug("REST: Authenticating with OAUTH: %s" % url)
112 ret = self.requester.post(url, auth=auth, headers=headers, verify=self.verify_ssl)
113 self._check_error_response(ret)
114
115 data = ret.json()
116 access_token = data["access_token"]
117 refresh_token = data["refresh_token"]
118 logger.debug("REST: Obtained refresh and access tokens")
119 return access_token, refresh_token
120
121 def refresh_token(self, token, refresh_token):
122 """Sends access_token and the refresh_token to get a pair of
123 access_token and refresh token
124
125 Artifactory >= 6.13.X
126 """
127 url = self.router.oauth_authenticate()
128 logger.debug("REST: Refreshing Token: %s" % url)
129 headers = {}
130 headers.update(self.custom_headers)
131 headers["Content-type"] = "application/x-www-form-urlencoded"
132 payload = {'access_token': token, 'refresh_token': refresh_token,
133 'grant_type': 'refresh_token'}
134 ret = self.requester.post(url, headers=headers, verify=self.verify_ssl, data=payload)
135 self._check_error_response(ret)
136
137 data = ret.json()
138 if "access_token" not in data:
139 logger.debug("REST: unexpected data from server: {}".format(data))
140 raise ConanException("Error refreshing the token")
141
142 new_access_token = data["access_token"]
143 new_refresh_token = data["refresh_token"]
144 logger.debug("REST: Obtained new refresh and access tokens")
145 return new_access_token, new_refresh_token
146
147 @handle_return_deserializer()
148 def check_credentials(self):
149 """If token is not valid will raise AuthenticationException.
150 User will be asked for new user/pass"""
151 url = self.router.common_check_credentials()
152 logger.debug("REST: Check credentials: %s" % url)
153 ret = self.requester.get(url, auth=self.auth, headers=self.custom_headers,
154 verify=self.verify_ssl)
155 return ret
156
157 def server_capabilities(self, user=None, password=None):
158 """Get information about the server: status, version, type and capabilities"""
159 url = self.router.ping()
160 logger.debug("REST: ping: %s" % url)
161 if user and password:
162 # This can happen in "conan user" cmd. Instead of empty token, use HttpBasic
163 auth = HTTPBasicAuth(user, password)
164 else:
165 auth = self.auth
166 ret = self.requester.get(url, auth=auth, headers=self.custom_headers, verify=self.verify_ssl)
167
168 server_capabilities = ret.headers.get('X-Conan-Server-Capabilities', "")
169 if not server_capabilities and not ret.ok:
170 # Old Artifactory might return 401/403 without capabilities, we don't want
171 # to cache them #5687, so raise the exception and force authentication
172 raise get_exception_from_error(ret.status_code)(response_to_str(ret))
173
174 return [cap.strip() for cap in server_capabilities.split(",") if cap]
175
176 def get_json(self, url, data=None, headers=None):
177 req_headers = self.custom_headers.copy()
178 req_headers.update(headers or {})
179 if data: # POST request
180 req_headers.update({'Content-type': 'application/json',
181 'Accept': 'application/json'})
182 logger.debug("REST: post: %s" % url)
183 response = self.requester.post(url, auth=self.auth, headers=req_headers,
184 verify=self.verify_ssl,
185 stream=True,
186 data=json.dumps(data))
187 else:
188 logger.debug("REST: get: %s" % url)
189 response = self.requester.get(url, auth=self.auth, headers=req_headers,
190 verify=self.verify_ssl,
191 stream=True)
192
193 if response.status_code != 200: # Error message is text
194 response.charset = "utf-8" # To be able to access ret.text (ret.content are bytes)
195 raise get_exception_from_error(response.status_code)(response_to_str(response))
196
197 content = decode_text(response.content)
198 content_type = response.headers.get("Content-Type")
199 if content_type != 'application/json':
200 raise ConanException("%s\n\nResponse from remote is not json, but '%s'"
201 % (content, content_type))
202
203 try: # This can fail, if some proxy returns 200 and an html message
204 result = json.loads(content)
205 except Exception:
206 raise ConanException("Remote responded with broken json: %s" % content)
207 if not isinstance(result, dict):
208 raise ConanException("Unexpected server response %s" % result)
209 return result
210
211 def upload_recipe(self, ref, files_to_upload, deleted, retry, retry_wait):
212 if files_to_upload:
213 self._upload_recipe(ref, files_to_upload, retry, retry_wait)
214 if deleted:
215 self._remove_conanfile_files(ref, deleted)
216
217 def get_recipe_snapshot(self, ref):
218 # this method is used only for UPLOADING, then it requires the credentials
219 # Check of credentials is done in the uploader
220 url = self.router.recipe_snapshot(ref)
221 snap = self._get_snapshot(url)
222 return snap
223
224 def get_package_snapshot(self, pref):
225 # this method is also used to check the integrity of the package upstream
226 # while installing, so check_credentials is done in uploader.
227 url = self.router.package_snapshot(pref)
228 snap = self._get_snapshot(url)
229 return snap
230
231 def upload_package(self, pref, files_to_upload, deleted, retry, retry_wait):
232 if files_to_upload:
233 self._upload_package(pref, files_to_upload, retry, retry_wait)
234 if deleted:
235 raise Exception("This shouldn't be happening, deleted files "
236 "in local package present in remote: %s.\n Please, report it at "
237 "https://github.com/conan-io/conan/issues " % str(deleted))
238
239 def search(self, pattern=None, ignorecase=True):
240 """
241 the_files: dict with relative_path: content
242 """
243 url = self.router.search(pattern, ignorecase)
244 response = self.get_json(url)["results"]
245 return [ConanFileReference.loads(reference) for reference in response]
246
247 def search_packages(self, ref, query):
248 """Client is filtering by the query"""
249 url = self.router.search_packages(ref, query)
250 package_infos = self.get_json(url)
251 return package_infos
252
[end of conans/client/rest/rest_client_common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/rest/rest_client_common.py b/conans/client/rest/rest_client_common.py
--- a/conans/client/rest/rest_client_common.py
+++ b/conans/client/rest/rest_client_common.py
@@ -196,7 +196,7 @@
content = decode_text(response.content)
content_type = response.headers.get("Content-Type")
- if content_type != 'application/json':
+ if content_type != 'application/json' and content_type != 'application/json; charset=utf-8':
raise ConanException("%s\n\nResponse from remote is not json, but '%s'"
% (content, content_type))
| {"golden_diff": "diff --git a/conans/client/rest/rest_client_common.py b/conans/client/rest/rest_client_common.py\n--- a/conans/client/rest/rest_client_common.py\n+++ b/conans/client/rest/rest_client_common.py\n@@ -196,7 +196,7 @@\n \n content = decode_text(response.content)\n content_type = response.headers.get(\"Content-Type\")\n- if content_type != 'application/json':\n+ if content_type != 'application/json' and content_type != 'application/json; charset=utf-8':\n raise ConanException(\"%s\\n\\nResponse from remote is not json, but '%s'\"\n % (content, content_type))\n", "issue": "[bug] Conan client fails when server charset is specified.\nI'm experimenting with implementing a conan server. While testing said server I ran into problems because my server fully qualifies the \"Content-Type\" HTTP header to \"application/json; charset=utf-8\". The conan client though errors when talking to that server with this error: `Response from remote is not json, but 'application/json; charset=utf-8'.`. JSON content requires utf-8 encoding and charset, which makes the addition in the content-type of the charset optional by the server. But regardless of what the charset is, utf-8 or ascii, clients need to handle there being a charset being specified. Which the conan client doesn't appear to be doing.\r\n\r\n### Environment Details (include every applicable attribute)\r\n * Operating System+version: 5.11.14-1-MANJARO #1 SMP PREEMPT Wed Apr 14 08:25:29 UTC 2021 x86_64 GNU/Linux\r\n * Compiler+version: NA\r\n * Conan version: 1.35.1\r\n * Python version: 3.9.3\r\n\n[bug] Conan client fails when server charset is specified.\nI'm experimenting with implementing a conan server. While testing said server I ran into problems because my server fully qualifies the \"Content-Type\" HTTP header to \"application/json; charset=utf-8\". The conan client though errors when talking to that server with this error: `Response from remote is not json, but 'application/json; charset=utf-8'.`. JSON content requires utf-8 encoding and charset, which makes the addition in the content-type of the charset optional by the server. But regardless of what the charset is, utf-8 or ascii, clients need to handle there being a charset being specified. Which the conan client doesn't appear to be doing.\r\n\r\n### Environment Details (include every applicable attribute)\r\n * Operating System+version: 5.11.14-1-MANJARO #1 SMP PREEMPT Wed Apr 14 08:25:29 UTC 2021 x86_64 GNU/Linux\r\n * Compiler+version: NA\r\n * Conan version: 1.35.1\r\n * Python version: 3.9.3\r\n\n", "before_files": [{"content": "import json\n\nfrom requests.auth import AuthBase, HTTPBasicAuth\n\nfrom conans.client.rest import response_to_str\nfrom conans.errors import (EXCEPTION_CODE_MAPPING, ConanException,\n AuthenticationException, RecipeNotFoundException,\n PackageNotFoundException)\nfrom conans.model.ref import ConanFileReference\nfrom conans.util.files import decode_text\nfrom conans.util.log import logger\n\n\nclass JWTAuth(AuthBase):\n \"\"\"Attaches JWT Authentication to the given Request object.\"\"\"\n\n def __init__(self, token):\n self.token = token\n\n def __call__(self, request):\n if self.token:\n request.headers['Authorization'] = \"Bearer %s\" % str(self.token)\n return request\n\n\ndef get_exception_from_error(error_code):\n tmp = {v: k for k, v in EXCEPTION_CODE_MAPPING.items() # All except NotFound\n if k not in (RecipeNotFoundException, PackageNotFoundException)}\n if error_code in tmp:\n logger.debug(\"REST ERROR: %s\" % str(tmp[error_code]))\n return tmp[error_code]\n else:\n base_error = int(str(error_code)[0] + \"00\")\n logger.debug(\"REST ERROR: %s\" % str(base_error))\n try:\n return tmp[base_error]\n except KeyError:\n return None\n\n\ndef handle_return_deserializer(deserializer=None):\n \"\"\"Decorator for rest api methods.\n Map exceptions and http return codes and deserialize if needed.\n\n deserializer: Function for deserialize values\"\"\"\n\n def handle_return(method):\n def inner(*argc, **argv):\n ret = method(*argc, **argv)\n if ret.status_code != 200:\n ret.charset = \"utf-8\" # To be able to access ret.text (ret.content are bytes)\n text = ret.text if ret.status_code != 404 else \"404 Not found\"\n raise get_exception_from_error(ret.status_code)(text)\n return deserializer(ret.content) if deserializer else decode_text(ret.content)\n\n return inner\n\n return handle_return\n\n\nclass RestCommonMethods(object):\n\n def __init__(self, remote_url, token, custom_headers, output, requester, config, verify_ssl,\n artifacts_properties=None, matrix_params=False):\n self.token = token\n self.remote_url = remote_url\n self.custom_headers = custom_headers\n self._output = output\n self.requester = requester\n self._config = config\n self.verify_ssl = verify_ssl\n self._artifacts_properties = artifacts_properties\n self._matrix_params = matrix_params\n\n @property\n def auth(self):\n return JWTAuth(self.token)\n\n @staticmethod\n def _check_error_response(ret):\n if ret.status_code == 401:\n raise AuthenticationException(\"Wrong user or password\")\n # Cannot check content-type=text/html, conan server is doing it wrong\n if not ret.ok or \"html>\" in str(ret.content):\n raise ConanException(\"%s\\n\\nInvalid server response, check remote URL and \"\n \"try again\" % str(ret.content))\n\n def authenticate(self, user, password):\n \"\"\"Sends user + password to get:\n - A plain response with a regular token (not supported refresh in the remote) and None\n \"\"\"\n auth = HTTPBasicAuth(user, password)\n url = self.router.common_authenticate()\n logger.debug(\"REST: Authenticate to get access_token: %s\" % url)\n ret = self.requester.get(url, auth=auth, headers=self.custom_headers,\n verify=self.verify_ssl)\n\n self._check_error_response(ret)\n return decode_text(ret.content)\n\n def authenticate_oauth(self, user, password):\n \"\"\"Sends user + password to get:\n - A json with an access_token and a refresh token (if supported in the remote)\n Artifactory >= 6.13.X\n \"\"\"\n url = self.router.oauth_authenticate()\n auth = HTTPBasicAuth(user, password)\n headers = {}\n headers.update(self.custom_headers)\n headers[\"Content-type\"] = \"application/x-www-form-urlencoded\"\n logger.debug(\"REST: Authenticating with OAUTH: %s\" % url)\n ret = self.requester.post(url, auth=auth, headers=headers, verify=self.verify_ssl)\n self._check_error_response(ret)\n\n data = ret.json()\n access_token = data[\"access_token\"]\n refresh_token = data[\"refresh_token\"]\n logger.debug(\"REST: Obtained refresh and access tokens\")\n return access_token, refresh_token\n\n def refresh_token(self, token, refresh_token):\n \"\"\"Sends access_token and the refresh_token to get a pair of\n access_token and refresh token\n\n Artifactory >= 6.13.X\n \"\"\"\n url = self.router.oauth_authenticate()\n logger.debug(\"REST: Refreshing Token: %s\" % url)\n headers = {}\n headers.update(self.custom_headers)\n headers[\"Content-type\"] = \"application/x-www-form-urlencoded\"\n payload = {'access_token': token, 'refresh_token': refresh_token,\n 'grant_type': 'refresh_token'}\n ret = self.requester.post(url, headers=headers, verify=self.verify_ssl, data=payload)\n self._check_error_response(ret)\n\n data = ret.json()\n if \"access_token\" not in data:\n logger.debug(\"REST: unexpected data from server: {}\".format(data))\n raise ConanException(\"Error refreshing the token\")\n\n new_access_token = data[\"access_token\"]\n new_refresh_token = data[\"refresh_token\"]\n logger.debug(\"REST: Obtained new refresh and access tokens\")\n return new_access_token, new_refresh_token\n\n @handle_return_deserializer()\n def check_credentials(self):\n \"\"\"If token is not valid will raise AuthenticationException.\n User will be asked for new user/pass\"\"\"\n url = self.router.common_check_credentials()\n logger.debug(\"REST: Check credentials: %s\" % url)\n ret = self.requester.get(url, auth=self.auth, headers=self.custom_headers,\n verify=self.verify_ssl)\n return ret\n\n def server_capabilities(self, user=None, password=None):\n \"\"\"Get information about the server: status, version, type and capabilities\"\"\"\n url = self.router.ping()\n logger.debug(\"REST: ping: %s\" % url)\n if user and password:\n # This can happen in \"conan user\" cmd. Instead of empty token, use HttpBasic\n auth = HTTPBasicAuth(user, password)\n else:\n auth = self.auth\n ret = self.requester.get(url, auth=auth, headers=self.custom_headers, verify=self.verify_ssl)\n\n server_capabilities = ret.headers.get('X-Conan-Server-Capabilities', \"\")\n if not server_capabilities and not ret.ok:\n # Old Artifactory might return 401/403 without capabilities, we don't want\n # to cache them #5687, so raise the exception and force authentication\n raise get_exception_from_error(ret.status_code)(response_to_str(ret))\n\n return [cap.strip() for cap in server_capabilities.split(\",\") if cap]\n\n def get_json(self, url, data=None, headers=None):\n req_headers = self.custom_headers.copy()\n req_headers.update(headers or {})\n if data: # POST request\n req_headers.update({'Content-type': 'application/json',\n 'Accept': 'application/json'})\n logger.debug(\"REST: post: %s\" % url)\n response = self.requester.post(url, auth=self.auth, headers=req_headers,\n verify=self.verify_ssl,\n stream=True,\n data=json.dumps(data))\n else:\n logger.debug(\"REST: get: %s\" % url)\n response = self.requester.get(url, auth=self.auth, headers=req_headers,\n verify=self.verify_ssl,\n stream=True)\n\n if response.status_code != 200: # Error message is text\n response.charset = \"utf-8\" # To be able to access ret.text (ret.content are bytes)\n raise get_exception_from_error(response.status_code)(response_to_str(response))\n\n content = decode_text(response.content)\n content_type = response.headers.get(\"Content-Type\")\n if content_type != 'application/json':\n raise ConanException(\"%s\\n\\nResponse from remote is not json, but '%s'\"\n % (content, content_type))\n\n try: # This can fail, if some proxy returns 200 and an html message\n result = json.loads(content)\n except Exception:\n raise ConanException(\"Remote responded with broken json: %s\" % content)\n if not isinstance(result, dict):\n raise ConanException(\"Unexpected server response %s\" % result)\n return result\n\n def upload_recipe(self, ref, files_to_upload, deleted, retry, retry_wait):\n if files_to_upload:\n self._upload_recipe(ref, files_to_upload, retry, retry_wait)\n if deleted:\n self._remove_conanfile_files(ref, deleted)\n\n def get_recipe_snapshot(self, ref):\n # this method is used only for UPLOADING, then it requires the credentials\n # Check of credentials is done in the uploader\n url = self.router.recipe_snapshot(ref)\n snap = self._get_snapshot(url)\n return snap\n\n def get_package_snapshot(self, pref):\n # this method is also used to check the integrity of the package upstream\n # while installing, so check_credentials is done in uploader.\n url = self.router.package_snapshot(pref)\n snap = self._get_snapshot(url)\n return snap\n\n def upload_package(self, pref, files_to_upload, deleted, retry, retry_wait):\n if files_to_upload:\n self._upload_package(pref, files_to_upload, retry, retry_wait)\n if deleted:\n raise Exception(\"This shouldn't be happening, deleted files \"\n \"in local package present in remote: %s.\\n Please, report it at \"\n \"https://github.com/conan-io/conan/issues \" % str(deleted))\n\n def search(self, pattern=None, ignorecase=True):\n \"\"\"\n the_files: dict with relative_path: content\n \"\"\"\n url = self.router.search(pattern, ignorecase)\n response = self.get_json(url)[\"results\"]\n return [ConanFileReference.loads(reference) for reference in response]\n\n def search_packages(self, ref, query):\n \"\"\"Client is filtering by the query\"\"\"\n url = self.router.search_packages(ref, query)\n package_infos = self.get_json(url)\n return package_infos\n", "path": "conans/client/rest/rest_client_common.py"}]} | 3,938 | 138 |
gh_patches_debug_7527 | rasdani/github-patches | git_diff | mindsdb__mindsdb-748 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check if the python version is at least 3.6 on server start.
On Server start check if Python version is < 3.8
if lower version, print message:
```
MindsDB server requires Python >= 3.8 to run
Once you have Python 3.8 installed you can tun mindsdb as follows:
1. create and activate venv:
python3.8 -m venv venv
source venv/bin/activate
2. install MindsDB:
pip3 install mindsdb
3. Run MindsDB
python3.8 -m mindsdb
More instructions in https://docs.mindsdb.com
```
and exit
</issue>
<code>
[start of mindsdb/__main__.py]
1 import atexit
2 import traceback
3 import sys
4 import os
5 import time
6
7 from pkg_resources import get_distribution
8 import torch.multiprocessing as mp
9
10 from mindsdb.utilities.config import Config
11 from mindsdb.interfaces.native.mindsdb import MindsdbNative
12 from mindsdb.interfaces.custom.custom_models import CustomModels
13 from mindsdb.api.http.start import start as start_http
14 from mindsdb.api.mysql.start import start as start_mysql
15 from mindsdb.api.mongo.start import start as start_mongo
16 from mindsdb.utilities.fs import get_or_create_dir_struct, update_versions_file
17 from mindsdb.utilities.ps import is_port_in_use
18 from mindsdb.interfaces.database.database import DatabaseWrapper
19 from mindsdb.utilities.functions import args_parse
20
21
22 def close_api_gracefully(p_arr):
23 for p in p_arr:
24 sys.stdout.flush()
25 p.terminate()
26 p.join()
27 sys.stdout.flush()
28
29
30 if __name__ == '__main__':
31 mp.freeze_support()
32
33 args = args_parse()
34
35 config_path = args.config
36 if config_path is None:
37 config_dir, _ = get_or_create_dir_struct()
38 config_path = os.path.join(config_dir, 'config.json')
39
40 config = Config(config_path)
41
42 from mindsdb.__about__ import __version__ as mindsdb_version
43
44 if args.version:
45 print(f'MindsDB {mindsdb_version}')
46 sys.exit(0)
47
48 try:
49 lightwood_version = get_distribution('lightwood').version
50 except Exception:
51 from lightwood.__about__ import __version__ as lightwood_version
52
53 try:
54 mindsdb_native_version = get_distribution('mindsdb_native').version
55 except Exception:
56 from mindsdb_native.__about__ import __version__ as mindsdb_native_version
57
58 if args.verbose:
59 config['log']['level']['console'] = 'INFO'
60
61 print(f'Configuration file:\n {config_path}')
62 print(f"Storage path:\n {config.paths['root']}")
63
64 print('Versions:')
65 print(f' - lightwood {lightwood_version}')
66 print(f' - MindsDB_native {mindsdb_native_version}')
67 print(f' - MindsDB {mindsdb_version}')
68
69 os.environ['MINDSDB_STORAGE_PATH'] = config.paths['predictors']
70 if args.verbose is True:
71 os.environ['DEFAULT_LOG_LEVEL'] = 'INFO'
72 os.environ['LIGHTWOOD_LOG_LEVEL'] = 'INFO'
73 else:
74 os.environ['DEFAULT_LOG_LEVEL'] = 'ERROR'
75 os.environ['LIGHTWOOD_LOG_LEVEL'] = 'ERROR'
76
77 update_versions_file(
78 config,
79 {
80 'lightwood': lightwood_version,
81 'mindsdb_native': mindsdb_native_version,
82 'mindsdb': mindsdb_version,
83 'python': sys.version.replace('\n', '')
84 }
85 )
86
87 if args.api is None:
88 api_arr = ['http', 'mysql']
89 else:
90 api_arr = args.api.split(',')
91
92 api_arr = [{
93 'name': api,
94 'port': config['api'][api]['port'],
95 'started': False
96 } for api in api_arr]
97
98 for api in api_arr:
99 api_name = api['name']
100 if api_name not in config['api']:
101 print(f"Trying run '{api_name}' API, but is no config for this api.")
102 print(f"Please, fill config['api']['{api_name}']")
103 sys.exit(0)
104
105 start_functions = {
106 'http': start_http,
107 'mysql': start_mysql,
108 'mongodb': start_mongo
109 }
110
111 mdb = MindsdbNative(config)
112 cst = CustomModels(config)
113 # @TODO Maybe just use `get_model_data` directly here ? Seems like a useless abstraction
114 model_data_arr = [
115 {
116 'name': x['name'],
117 'predict': x['predict'],
118 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']
119 } for x in mdb.get_models()
120 ]
121
122 for m in model_data_arr:
123 if 'columns_to_ignore' in m['data_analysis']:
124 del m['data_analysis']['columns_to_ignore']
125 if 'train_std_dev' in m['data_analysis']:
126 del m['data_analysis']['train_std_dev']
127
128 model_data_arr.extend(cst.get_models())
129
130 dbw = DatabaseWrapper(config)
131 dbw.register_predictors(model_data_arr)
132
133 for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:
134 print(f'Error failed to integrate with database aliased: {broken_name}')
135
136 p_arr = []
137 ctx = mp.get_context('spawn')
138
139 for api in api_arr:
140 api_name = api['name']
141 print(f'{api_name} API: starting...')
142 try:
143 p = ctx.Process(target=start_functions[api_name], args=(config_path, args.verbose))
144 p.start()
145 p_arr.append(p)
146 except Exception as e:
147 close_api_gracefully(p_arr)
148 print(f'Failed to start {api_name} API with exception {e}')
149 print(traceback.format_exc())
150 raise
151
152 atexit.register(close_api_gracefully, p_arr=p_arr)
153
154 timeout = 15
155 start_time = time.time()
156 all_started = False
157 while (time.time() - start_time) < timeout and all_started is False:
158 all_started = True
159 for i, api in enumerate(api_arr):
160 try:
161 in_use = api['started'] or is_port_in_use(api['port'])
162 except Exception:
163 # NOTE that hotfix for OSX: is_port_in_use will raise AccessDenied error if it runned not as sudo
164 in_use = True
165 if in_use and api['started'] != in_use:
166 api['started'] = in_use
167 print(f"{api['name']} API: started on {api['port']}")
168 all_started = all_started and in_use
169 time.sleep(0.5)
170
171 for p in p_arr:
172 p.join()
173
[end of mindsdb/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -28,6 +28,28 @@
if __name__ == '__main__':
+ version_error_msg = """
+MindsDB server requires Python >= 3.6 to run
+
+Once you have Python 3.6 installed you can tun mindsdb as follows:
+
+1. create and activate venv:
+python3.6 -m venv venv
+source venv/bin/activate
+
+2. install MindsDB:
+pip3 install mindsdb
+
+3. Run MindsDB
+python3.6 -m mindsdb
+
+More instructions in https://docs.mindsdb.com
+ """
+
+ if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
+ print(version_error_msg)
+ exit(1)
+
mp.freeze_support()
args = args_parse()
| {"golden_diff": "diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py\n--- a/mindsdb/__main__.py\n+++ b/mindsdb/__main__.py\n@@ -28,6 +28,28 @@\n \n \n if __name__ == '__main__':\n+ version_error_msg = \"\"\"\n+MindsDB server requires Python >= 3.6 to run\n+\n+Once you have Python 3.6 installed you can tun mindsdb as follows:\n+\n+1. create and activate venv:\n+python3.6 -m venv venv\n+source venv/bin/activate\n+\n+2. install MindsDB:\n+pip3 install mindsdb\n+\n+3. Run MindsDB\n+python3.6 -m mindsdb\n+\n+More instructions in https://docs.mindsdb.com\n+ \"\"\"\n+\n+ if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):\n+ print(version_error_msg)\n+ exit(1)\n+\n mp.freeze_support()\n \n args = args_parse()\n", "issue": "Check if the python version is at least 3.6 on server start.\nOn Server start check if Python version is < 3.8\r\n\r\nif lower version, print message:\r\n\r\n```\r\nMindsDB server requires Python >= 3.8 to run\r\n\r\nOnce you have Python 3.8 installed you can tun mindsdb as follows:\r\n\r\n1. create and activate venv:\r\n python3.8 -m venv venv\r\n source venv/bin/activate\r\n2. install MindsDB:\r\n pip3 install mindsdb\r\n3. Run MindsDB\r\n python3.8 -m mindsdb\r\n\r\nMore instructions in https://docs.mindsdb.com\r\n\r\n```\r\n\r\nand exit\n", "before_files": [{"content": "import atexit\nimport traceback\nimport sys\nimport os\nimport time\n\nfrom pkg_resources import get_distribution\nimport torch.multiprocessing as mp\n\nfrom mindsdb.utilities.config import Config\nfrom mindsdb.interfaces.native.mindsdb import MindsdbNative\nfrom mindsdb.interfaces.custom.custom_models import CustomModels\nfrom mindsdb.api.http.start import start as start_http\nfrom mindsdb.api.mysql.start import start as start_mysql\nfrom mindsdb.api.mongo.start import start as start_mongo\nfrom mindsdb.utilities.fs import get_or_create_dir_struct, update_versions_file\nfrom mindsdb.utilities.ps import is_port_in_use\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\nfrom mindsdb.utilities.functions import args_parse\n\n\ndef close_api_gracefully(p_arr):\n for p in p_arr:\n sys.stdout.flush()\n p.terminate()\n p.join()\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n mp.freeze_support()\n\n args = args_parse()\n\n config_path = args.config\n if config_path is None:\n config_dir, _ = get_or_create_dir_struct()\n config_path = os.path.join(config_dir, 'config.json')\n\n config = Config(config_path)\n\n from mindsdb.__about__ import __version__ as mindsdb_version\n\n if args.version:\n print(f'MindsDB {mindsdb_version}')\n sys.exit(0)\n\n try:\n lightwood_version = get_distribution('lightwood').version\n except Exception:\n from lightwood.__about__ import __version__ as lightwood_version\n\n try:\n mindsdb_native_version = get_distribution('mindsdb_native').version\n except Exception:\n from mindsdb_native.__about__ import __version__ as mindsdb_native_version\n\n if args.verbose:\n config['log']['level']['console'] = 'INFO'\n\n print(f'Configuration file:\\n {config_path}')\n print(f\"Storage path:\\n {config.paths['root']}\")\n\n print('Versions:')\n print(f' - lightwood {lightwood_version}')\n print(f' - MindsDB_native {mindsdb_native_version}')\n print(f' - MindsDB {mindsdb_version}')\n\n os.environ['MINDSDB_STORAGE_PATH'] = config.paths['predictors']\n if args.verbose is True:\n os.environ['DEFAULT_LOG_LEVEL'] = 'INFO'\n os.environ['LIGHTWOOD_LOG_LEVEL'] = 'INFO'\n else:\n os.environ['DEFAULT_LOG_LEVEL'] = 'ERROR'\n os.environ['LIGHTWOOD_LOG_LEVEL'] = 'ERROR'\n\n update_versions_file(\n config,\n {\n 'lightwood': lightwood_version,\n 'mindsdb_native': mindsdb_native_version,\n 'mindsdb': mindsdb_version,\n 'python': sys.version.replace('\\n', '')\n }\n )\n\n if args.api is None:\n api_arr = ['http', 'mysql']\n else:\n api_arr = args.api.split(',')\n\n api_arr = [{\n 'name': api,\n 'port': config['api'][api]['port'],\n 'started': False\n } for api in api_arr]\n\n for api in api_arr:\n api_name = api['name']\n if api_name not in config['api']:\n print(f\"Trying run '{api_name}' API, but is no config for this api.\")\n print(f\"Please, fill config['api']['{api_name}']\")\n sys.exit(0)\n\n start_functions = {\n 'http': start_http,\n 'mysql': start_mysql,\n 'mongodb': start_mongo\n }\n\n mdb = MindsdbNative(config)\n cst = CustomModels(config)\n # @TODO Maybe just use `get_model_data` directly here ? Seems like a useless abstraction\n model_data_arr = [\n {\n 'name': x['name'],\n 'predict': x['predict'],\n 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']\n } for x in mdb.get_models()\n ]\n\n for m in model_data_arr:\n if 'columns_to_ignore' in m['data_analysis']:\n del m['data_analysis']['columns_to_ignore']\n if 'train_std_dev' in m['data_analysis']:\n del m['data_analysis']['train_std_dev']\n\n model_data_arr.extend(cst.get_models())\n\n dbw = DatabaseWrapper(config)\n dbw.register_predictors(model_data_arr)\n\n for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:\n print(f'Error failed to integrate with database aliased: {broken_name}')\n\n p_arr = []\n ctx = mp.get_context('spawn')\n\n for api in api_arr:\n api_name = api['name']\n print(f'{api_name} API: starting...')\n try:\n p = ctx.Process(target=start_functions[api_name], args=(config_path, args.verbose))\n p.start()\n p_arr.append(p)\n except Exception as e:\n close_api_gracefully(p_arr)\n print(f'Failed to start {api_name} API with exception {e}')\n print(traceback.format_exc())\n raise\n\n atexit.register(close_api_gracefully, p_arr=p_arr)\n\n timeout = 15\n start_time = time.time()\n all_started = False\n while (time.time() - start_time) < timeout and all_started is False:\n all_started = True\n for i, api in enumerate(api_arr):\n try:\n in_use = api['started'] or is_port_in_use(api['port'])\n except Exception:\n # NOTE that hotfix for OSX: is_port_in_use will raise AccessDenied error if it runned not as sudo\n in_use = True\n if in_use and api['started'] != in_use:\n api['started'] = in_use\n print(f\"{api['name']} API: started on {api['port']}\")\n all_started = all_started and in_use\n time.sleep(0.5)\n\n for p in p_arr:\n p.join()\n", "path": "mindsdb/__main__.py"}]} | 2,396 | 234 |
gh_patches_debug_15825 | rasdani/github-patches | git_diff | zulip__zulip-13771 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Creation of temporary files in requirements/ can cause provision to fail
An example to trigger this for me is was as follows:
* `cd requirements/`
* edit file using editor which creates temporary file in this location (eg vim, depending on configuration)
* `tools/provision`
* provision fails with an error like
```
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xcd in position 17: invalid continuation byte
```
This appears to be due to the venv management script not being able to handle the unexpected file produced by eg. vim.
This is not a major issue, but is a bit of a strange issue to debug if you are not expecting it or are new, and potentially could be easy to fix.
</issue>
<code>
[start of scripts/lib/clean_venv_cache.py]
1 #!/usr/bin/env python3
2 import argparse
3 import os
4 import sys
5
6 from typing import Set
7
8 ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
9 sys.path.append(ZULIP_PATH)
10 from scripts.lib.hash_reqs import expand_reqs, hash_deps
11 from scripts.lib.zulip_tools import \
12 get_environment, get_recent_deployments, parse_cache_script_args, \
13 purge_unused_caches
14
15 ENV = get_environment()
16 VENV_CACHE_DIR = '/srv/zulip-venv-cache'
17 if ENV == "travis":
18 VENV_CACHE_DIR = os.path.join(os.environ["HOME"], "zulip-venv-cache")
19
20 def get_caches_in_use(threshold_days):
21 # type: (int) -> Set[str]
22 setups_to_check = set([ZULIP_PATH, ])
23 caches_in_use = set()
24
25 def add_current_venv_cache(venv_name: str) -> None:
26 CACHE_SYMLINK = os.path.join(os.path.dirname(ZULIP_PATH), venv_name)
27 CURRENT_CACHE = os.path.dirname(os.path.realpath(CACHE_SYMLINK))
28 caches_in_use.add(CURRENT_CACHE)
29
30 if ENV == "prod":
31 setups_to_check |= get_recent_deployments(threshold_days)
32 if ENV == "dev":
33 add_current_venv_cache("zulip-py3-venv")
34 add_current_venv_cache("zulip-thumbor-venv")
35
36 for path in setups_to_check:
37 reqs_dir = os.path.join(path, "requirements")
38 # If the target directory doesn't contain a requirements
39 # directory, skip it to avoid throwing an exception trying to
40 # list its requirements subdirectory.
41 if not os.path.exists(reqs_dir):
42 continue
43 for filename in os.listdir(reqs_dir):
44 requirements_file = os.path.join(reqs_dir, filename)
45 deps = expand_reqs(requirements_file)
46 hash_val = hash_deps(deps)
47 caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))
48
49 return caches_in_use
50
51 def main(args: argparse.Namespace) -> None:
52 caches_in_use = get_caches_in_use(args.threshold_days)
53 purge_unused_caches(
54 VENV_CACHE_DIR, caches_in_use, "venv cache", args)
55
56 if __name__ == "__main__":
57 args = parse_cache_script_args("This script cleans unused zulip venv caches.")
58 main(args)
59
[end of scripts/lib/clean_venv_cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/clean_venv_cache.py b/scripts/lib/clean_venv_cache.py
--- a/scripts/lib/clean_venv_cache.py
+++ b/scripts/lib/clean_venv_cache.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
import argparse
+import glob
import os
import sys
@@ -40,8 +41,8 @@
# list its requirements subdirectory.
if not os.path.exists(reqs_dir):
continue
- for filename in os.listdir(reqs_dir):
- requirements_file = os.path.join(reqs_dir, filename)
+ requirements_files = glob.glob(os.path.join(reqs_dir, "*.txt"))
+ for requirements_file in requirements_files:
deps = expand_reqs(requirements_file)
hash_val = hash_deps(deps)
caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))
| {"golden_diff": "diff --git a/scripts/lib/clean_venv_cache.py b/scripts/lib/clean_venv_cache.py\n--- a/scripts/lib/clean_venv_cache.py\n+++ b/scripts/lib/clean_venv_cache.py\n@@ -1,5 +1,6 @@\n #!/usr/bin/env python3\n import argparse\n+import glob\n import os\n import sys\n \n@@ -40,8 +41,8 @@\n # list its requirements subdirectory.\n if not os.path.exists(reqs_dir):\n continue\n- for filename in os.listdir(reqs_dir):\n- requirements_file = os.path.join(reqs_dir, filename)\n+ requirements_files = glob.glob(os.path.join(reqs_dir, \"*.txt\"))\n+ for requirements_file in requirements_files:\n deps = expand_reqs(requirements_file)\n hash_val = hash_deps(deps)\n caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))\n", "issue": "Creation of temporary files in requirements/ can cause provision to fail\nAn example to trigger this for me is was as follows:\r\n* `cd requirements/`\r\n* edit file using editor which creates temporary file in this location (eg vim, depending on configuration)\r\n* `tools/provision`\r\n* provision fails with an error like\r\n```\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xcd in position 17: invalid continuation byte\r\n```\r\n\r\nThis appears to be due to the venv management script not being able to handle the unexpected file produced by eg. vim.\r\n\r\nThis is not a major issue, but is a bit of a strange issue to debug if you are not expecting it or are new, and potentially could be easy to fix.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nimport os\nimport sys\n\nfrom typing import Set\n\nZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(ZULIP_PATH)\nfrom scripts.lib.hash_reqs import expand_reqs, hash_deps\nfrom scripts.lib.zulip_tools import \\\n get_environment, get_recent_deployments, parse_cache_script_args, \\\n purge_unused_caches\n\nENV = get_environment()\nVENV_CACHE_DIR = '/srv/zulip-venv-cache'\nif ENV == \"travis\":\n VENV_CACHE_DIR = os.path.join(os.environ[\"HOME\"], \"zulip-venv-cache\")\n\ndef get_caches_in_use(threshold_days):\n # type: (int) -> Set[str]\n setups_to_check = set([ZULIP_PATH, ])\n caches_in_use = set()\n\n def add_current_venv_cache(venv_name: str) -> None:\n CACHE_SYMLINK = os.path.join(os.path.dirname(ZULIP_PATH), venv_name)\n CURRENT_CACHE = os.path.dirname(os.path.realpath(CACHE_SYMLINK))\n caches_in_use.add(CURRENT_CACHE)\n\n if ENV == \"prod\":\n setups_to_check |= get_recent_deployments(threshold_days)\n if ENV == \"dev\":\n add_current_venv_cache(\"zulip-py3-venv\")\n add_current_venv_cache(\"zulip-thumbor-venv\")\n\n for path in setups_to_check:\n reqs_dir = os.path.join(path, \"requirements\")\n # If the target directory doesn't contain a requirements\n # directory, skip it to avoid throwing an exception trying to\n # list its requirements subdirectory.\n if not os.path.exists(reqs_dir):\n continue\n for filename in os.listdir(reqs_dir):\n requirements_file = os.path.join(reqs_dir, filename)\n deps = expand_reqs(requirements_file)\n hash_val = hash_deps(deps)\n caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))\n\n return caches_in_use\n\ndef main(args: argparse.Namespace) -> None:\n caches_in_use = get_caches_in_use(args.threshold_days)\n purge_unused_caches(\n VENV_CACHE_DIR, caches_in_use, \"venv cache\", args)\n\nif __name__ == \"__main__\":\n args = parse_cache_script_args(\"This script cleans unused zulip venv caches.\")\n main(args)\n", "path": "scripts/lib/clean_venv_cache.py"}]} | 1,340 | 198 |
gh_patches_debug_37464 | rasdani/github-patches | git_diff | hydroshare__hydroshare-5088 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
track users' full email domain
**Describe the feature you'd like and what it will do**
In HS v2.5.4, we don't track full user email domain in our metrics.
**Why is this feature important?**
We need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.
</issue>
<code>
[start of hs_tracking/middleware.py]
1 from django.utils.deprecation import MiddlewareMixin
2
3 from .models import Session
4 from . import utils
5 import re
6
7 RESOURCE_RE = re.compile('resource/([0-9a-f]{32})/') # parser for resource id
8 BAG_RE = re.compile('bags/([0-9a-f]{32})\.zip') # parser for resource id # noqa
9 LANDING_RE = re.compile('resource/([0-9a-f]{32})/$') # reference to resource home page
10 REST_RE = re.compile('/hsapi/') # reference to REST or internal
11 INTERNAL_RE = re.compile('/hsapi/_internal/') # reference to an internal page
12
13
14 def get_resource_id_from_url(path):
15 """ read a resource id from a URL """
16 m = RESOURCE_RE.search(path)
17 if m and m.group(1):
18 return m.group(1)
19 m = BAG_RE.search(path)
20 if m and m.group(1):
21 return m.group(1)
22 return None
23
24
25 def get_rest_from_url(path):
26 """ determine whether a URL is a REST call or not
27
28 This should always return boolean, not search result.
29 """
30 if REST_RE.search(path):
31 if INTERNAL_RE.search(path):
32 return False
33 else:
34 return True
35 else:
36 return False
37
38
39 def get_landing_from_url(path):
40 """ determine whether a URL is a landing page.
41
42 This should always return boolean, not search result.
43 """
44 if LANDING_RE.search(path):
45 return True
46 else:
47 return False
48
49
50 class Tracking(MiddlewareMixin):
51 """The default tracking middleware logs all successful responses as a 'visit' variable with
52 the URL path as its value."""
53
54 def process_response(self, request, response):
55
56 # filter out heartbeat messages
57 if request.path.startswith('/heartbeat/'):
58 return response
59
60 # filter out web crawlers
61 is_human = getattr(request, 'is_human', False)
62 if not is_human:
63 return response
64
65 # filter out everything that is not an OK response
66 if response.status_code != 200:
67 return response
68
69 if not hasattr(request, 'user'):
70 return response
71
72 # get user info that will be recorded in the visit log
73 session = Session.objects.for_request(request)
74 usertype = utils.get_user_type(session)
75 emaildomain = utils.get_user_email_domain(session)
76 ip = utils.get_client_ip(request)
77
78 # build the message string (key:value pairs)
79 msg = '|'.join([str(item) for item in
80 ['user_ip=%s' % ip,
81 'http_method=%s' % request.method,
82 'http_code=%s' % response.status_code,
83 'user_type=%s' % usertype,
84 'user_email_domain=%s' % emaildomain,
85 'request_url=%s' % request.path]])
86
87 resource_id = get_resource_id_from_url(request.path)
88 rest = get_rest_from_url(request.path)
89 landing = get_landing_from_url(request.path)
90
91 # save the activity in the database
92 session.record('visit', value=msg, resource_id=resource_id,
93 landing=landing, rest=rest)
94
95 return response
96
[end of hs_tracking/middleware.py]
[start of hs_tracking/utils.py]
1 import robot_detection
2 from ipware.ip import get_ip
3 from hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile
4 from urllib.parse import urlparse
5
6
7 def get_client_ip(request):
8 return get_ip(request)
9
10
11 def get_user_type(session):
12 try:
13 user = session.visitor.user
14 usertype = user.userprofile.user_type
15 except AttributeError:
16 usertype = None
17 return usertype
18
19
20 def get_user_email_domain(session):
21 try:
22 user = session.visitor.user
23 emaildomain = user.email.split('@')[-1]
24 shortdomain = '.'.join(emaildomain.split('.')[1:])
25 except AttributeError:
26 shortdomain = None
27 return shortdomain
28
29
30 def is_human(user_agent):
31 if robot_detection.is_robot(user_agent):
32 return False
33 return True
34
35
36 def get_std_log_fields(request, session=None):
37 """ returns a standard set of metadata that to each receiver function.
38 This ensures that all activities are reporting a consistent set of metrics
39 """
40 user_type = None
41 user_email = None
42 if session is not None:
43 user_type = get_user_type(session)
44 user_email = get_user_email_domain(session)
45
46 return {
47 'user_ip': get_client_ip(request),
48 'user_type': user_type,
49 'user_email_domain': user_email,
50 }
51
52
53 def authentic_redirect_url(url):
54 """ Validates a url scheme and netloc is in an existing web app
55 :param url: String of a url
56 :return: Boolean, True if the url exists in a web app
57 """
58 if not url:
59 return False
60 u = urlparse(url)
61 url_base = "{}://{}".format(u.scheme, u.netloc)
62 return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \
63 or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \
64 or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()
65
[end of hs_tracking/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_tracking/middleware.py b/hs_tracking/middleware.py
--- a/hs_tracking/middleware.py
+++ b/hs_tracking/middleware.py
@@ -72,7 +72,7 @@
# get user info that will be recorded in the visit log
session = Session.objects.for_request(request)
usertype = utils.get_user_type(session)
- emaildomain = utils.get_user_email_domain(session)
+ email_tld = utils.get_user_email_tld(session)
ip = utils.get_client_ip(request)
# build the message string (key:value pairs)
@@ -81,7 +81,7 @@
'http_method=%s' % request.method,
'http_code=%s' % response.status_code,
'user_type=%s' % usertype,
- 'user_email_domain=%s' % emaildomain,
+ 'user_email_domain=%s' % email_tld,
'request_url=%s' % request.path]])
resource_id = get_resource_id_from_url(request.path)
diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py
--- a/hs_tracking/utils.py
+++ b/hs_tracking/utils.py
@@ -21,10 +21,20 @@
try:
user = session.visitor.user
emaildomain = user.email.split('@')[-1]
- shortdomain = '.'.join(emaildomain.split('.')[1:])
except AttributeError:
- shortdomain = None
- return shortdomain
+ emaildomain = None
+ return emaildomain
+
+
+def get_user_email_tld(session, emaildomain=None):
+ try:
+ if not emaildomain:
+ emaildomain = get_user_email_domain(session)
+ if emaildomain:
+ shortdomain = '.'.join(emaildomain.split('.')[1:])
+ return shortdomain
+ except AttributeError:
+ return None
def is_human(user_agent):
@@ -38,15 +48,18 @@
This ensures that all activities are reporting a consistent set of metrics
"""
user_type = None
- user_email = None
+ user_email_tld = None
+ full_domain = None
if session is not None:
user_type = get_user_type(session)
- user_email = get_user_email_domain(session)
+ full_domain = get_user_email_domain(session)
+ user_email_tld = get_user_email_tld(session, full_domain)
return {
'user_ip': get_client_ip(request),
'user_type': user_type,
- 'user_email_domain': user_email,
+ 'user_email_domain': user_email_tld,
+ 'user_email_domain_full': full_domain
}
| {"golden_diff": "diff --git a/hs_tracking/middleware.py b/hs_tracking/middleware.py\n--- a/hs_tracking/middleware.py\n+++ b/hs_tracking/middleware.py\n@@ -72,7 +72,7 @@\n # get user info that will be recorded in the visit log\n session = Session.objects.for_request(request)\n usertype = utils.get_user_type(session)\n- emaildomain = utils.get_user_email_domain(session)\n+ email_tld = utils.get_user_email_tld(session)\n ip = utils.get_client_ip(request)\n \n # build the message string (key:value pairs)\n@@ -81,7 +81,7 @@\n 'http_method=%s' % request.method,\n 'http_code=%s' % response.status_code,\n 'user_type=%s' % usertype,\n- 'user_email_domain=%s' % emaildomain,\n+ 'user_email_domain=%s' % email_tld,\n 'request_url=%s' % request.path]])\n \n resource_id = get_resource_id_from_url(request.path)\ndiff --git a/hs_tracking/utils.py b/hs_tracking/utils.py\n--- a/hs_tracking/utils.py\n+++ b/hs_tracking/utils.py\n@@ -21,10 +21,20 @@\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n- shortdomain = '.'.join(emaildomain.split('.')[1:])\n except AttributeError:\n- shortdomain = None\n- return shortdomain\n+ emaildomain = None\n+ return emaildomain\n+\n+\n+def get_user_email_tld(session, emaildomain=None):\n+ try:\n+ if not emaildomain:\n+ emaildomain = get_user_email_domain(session)\n+ if emaildomain:\n+ shortdomain = '.'.join(emaildomain.split('.')[1:])\n+ return shortdomain\n+ except AttributeError:\n+ return None\n \n \n def is_human(user_agent):\n@@ -38,15 +48,18 @@\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n- user_email = None\n+ user_email_tld = None\n+ full_domain = None\n if session is not None:\n user_type = get_user_type(session)\n- user_email = get_user_email_domain(session)\n+ full_domain = get_user_email_domain(session)\n+ user_email_tld = get_user_email_tld(session, full_domain)\n \n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n- 'user_email_domain': user_email,\n+ 'user_email_domain': user_email_tld,\n+ 'user_email_domain_full': full_domain\n }\n", "issue": "track users' full email domain\n**Describe the feature you'd like and what it will do**\r\nIn HS v2.5.4, we don't track full user email domain in our metrics.\r\n\r\n\r\n**Why is this feature important?**\r\nWe need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.\r\n\n", "before_files": [{"content": "from django.utils.deprecation import MiddlewareMixin\n\nfrom .models import Session\nfrom . import utils\nimport re\n\nRESOURCE_RE = re.compile('resource/([0-9a-f]{32})/') # parser for resource id\nBAG_RE = re.compile('bags/([0-9a-f]{32})\\.zip') # parser for resource id # noqa\nLANDING_RE = re.compile('resource/([0-9a-f]{32})/$') # reference to resource home page\nREST_RE = re.compile('/hsapi/') # reference to REST or internal\nINTERNAL_RE = re.compile('/hsapi/_internal/') # reference to an internal page\n\n\ndef get_resource_id_from_url(path):\n \"\"\" read a resource id from a URL \"\"\"\n m = RESOURCE_RE.search(path)\n if m and m.group(1):\n return m.group(1)\n m = BAG_RE.search(path)\n if m and m.group(1):\n return m.group(1)\n return None\n\n\ndef get_rest_from_url(path):\n \"\"\" determine whether a URL is a REST call or not\n\n This should always return boolean, not search result.\n \"\"\"\n if REST_RE.search(path):\n if INTERNAL_RE.search(path):\n return False\n else:\n return True\n else:\n return False\n\n\ndef get_landing_from_url(path):\n \"\"\" determine whether a URL is a landing page.\n\n This should always return boolean, not search result.\n \"\"\"\n if LANDING_RE.search(path):\n return True\n else:\n return False\n\n\nclass Tracking(MiddlewareMixin):\n \"\"\"The default tracking middleware logs all successful responses as a 'visit' variable with\n the URL path as its value.\"\"\"\n\n def process_response(self, request, response):\n\n # filter out heartbeat messages\n if request.path.startswith('/heartbeat/'):\n return response\n\n # filter out web crawlers\n is_human = getattr(request, 'is_human', False)\n if not is_human:\n return response\n\n # filter out everything that is not an OK response\n if response.status_code != 200:\n return response\n\n if not hasattr(request, 'user'):\n return response\n\n # get user info that will be recorded in the visit log\n session = Session.objects.for_request(request)\n usertype = utils.get_user_type(session)\n emaildomain = utils.get_user_email_domain(session)\n ip = utils.get_client_ip(request)\n\n # build the message string (key:value pairs)\n msg = '|'.join([str(item) for item in\n ['user_ip=%s' % ip,\n 'http_method=%s' % request.method,\n 'http_code=%s' % response.status_code,\n 'user_type=%s' % usertype,\n 'user_email_domain=%s' % emaildomain,\n 'request_url=%s' % request.path]])\n\n resource_id = get_resource_id_from_url(request.path)\n rest = get_rest_from_url(request.path)\n landing = get_landing_from_url(request.path)\n\n # save the activity in the database\n session.record('visit', value=msg, resource_id=resource_id,\n landing=landing, rest=rest)\n\n return response\n", "path": "hs_tracking/middleware.py"}, {"content": "import robot_detection\nfrom ipware.ip import get_ip\nfrom hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile\nfrom urllib.parse import urlparse\n\n\ndef get_client_ip(request):\n return get_ip(request)\n\n\ndef get_user_type(session):\n try:\n user = session.visitor.user\n usertype = user.userprofile.user_type\n except AttributeError:\n usertype = None\n return usertype\n\n\ndef get_user_email_domain(session):\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n shortdomain = '.'.join(emaildomain.split('.')[1:])\n except AttributeError:\n shortdomain = None\n return shortdomain\n\n\ndef is_human(user_agent):\n if robot_detection.is_robot(user_agent):\n return False\n return True\n\n\ndef get_std_log_fields(request, session=None):\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n user_email = None\n if session is not None:\n user_type = get_user_type(session)\n user_email = get_user_email_domain(session)\n\n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email,\n }\n\n\ndef authentic_redirect_url(url):\n \"\"\" Validates a url scheme and netloc is in an existing web app\n :param url: String of a url\n :return: Boolean, True if the url exists in a web app\n \"\"\"\n if not url:\n return False\n u = urlparse(url)\n url_base = \"{}://{}\".format(u.scheme, u.netloc)\n return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()\n", "path": "hs_tracking/utils.py"}]} | 2,069 | 589 |
gh_patches_debug_36736 | rasdani/github-patches | git_diff | Textualize__textual-2034 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Testing Using Pilot - `pilot.click()` Doesn't Have Effect On `Static` Widget
Hi,
I am trying to write tests for the example calculator app present in the repo (at the same time, expanding it to add a few more operators). This is my `CalculatorApp` class:
```python
class CalculatorApp(App):
CSS_PATH = "calculator.css"
viewport = var("0")
show_ac = var(True)
left = var(Decimal("0"))
right = var(Decimal("0"))
value = var("")
operator = var("plus")
def watch_viewport(self, value: str) -> None:
self.query_one("#viewport", Static).update(value)
def compute_show_ac(self) -> bool:
return self.value in ("", "0") and self.viewport == "0"
def watch_show_ac(self, show_ac: bool) -> None:
self.query_one("#ac").display = show_ac
self.query_one("#c").display = not show_ac
def compose(self) -> ComposeResult:
with Container(id="calculator"):
yield Static(id="viewport")
yield Button("AC", id="ac", variant="primary")
yield Button("C", id="c", variant="primary")
yield Button("+/-", id="negation", variant="primary")
yield Button("%", id="percent", variant="primary")
yield Button("sin(x)", id="sine", variant="warning")
yield Button("cos(x)", id="cosine", variant="warning")
yield Button("7", id="number-7", variant="primary")
yield Button("8", id="number-8", variant="primary")
yield Button("9", id="number-9", variant="primary")
yield Button("+", id="plus", variant="warning")
yield Button("x^y", id="exponent", variant="warning")
yield Button("4", id="number-4", variant="primary")
yield Button("5", id="number-5", variant="primary")
yield Button("6", id="number-6", variant="primary")
yield Button("-", id="minus", variant="warning")
yield Button("ln(x)", id="logarithm", variant="warning")
yield Button("1", id="number-1", variant="primary")
yield Button("2", id="number-2", variant="primary")
yield Button("3", id="number-3", variant="primary")
yield Button("*", id="multiply", variant="warning")
yield Button("x!", id="factorial", variant="warning")
yield Button("0", id="number-0", variant="primary")
yield Button(".", id="point", variant="primary")
yield Button("÷", id="divide", variant="warning")
yield Button("=", id="equals", variant="warning")
def on_button_pressed(self, event: Button.Pressed) -> None:
button_id = event.button.id
assert button_id is not None
if button_id.startswith("number-"):
number = button_id.split("-")[-1]
self.viewport = self.value = self.value.lstrip("0") + number
```
I wrote the following test to check that clicking the number buttons results in the calculator's display (`Static(id="viewport")`) accumulating digits to make a number:
```python
async def test_number_buttons():
async with CalculatorApp().run_test() as pilot:
app = pilot.app
await pilot.click("#number-1")
display_content = app.query_one("#viewport").render()
assert str(display_content) == "1"
await pilot.click("#number-2")
display_content = app.query_one("#viewport").render()
assert str(display_content) == "12"
await pilot.click("#number-3")
display_content = app.query_one("#viewport").render()
assert str(display_content) == "123"
```
While the GUI gets updated correctly on clicking the buttons, the test always fails since `app.query_one("#viewport").render()` always returns `"0"`. I've also tried replacing `app.query_one("#viewport").render()` with `app.query_one("#viewport", Static).render()` but that hasn't helped either.
Is this supposed to happen?
# Textual Diagnostics
## Versions
| Name | Value |
|---------|--------|
| Textual | 0.14.0 |
| Rich | 13.3.2 |
## Python
| Name | Value |
|----------------|-----------------------------------------------|
| Version | 3.10.9 |
| Implementation | CPython |
| Compiler | GCC 11.2.0 |
| Executable | /home/malay_agr/anaconda3/envs/spe/bin/python |
## Operating System
| Name | Value |
|---------|---------------------------------------------|
| System | Linux |
| Release | 5.15.0-52-generic |
| Version | #58-Ubuntu SMP Thu Oct 13 08:03:55 UTC 2022 |
## Terminal
| Name | Value |
|----------------------|-----------------|
| Terminal Application | vscode (1.76.1) |
| TERM | xterm-256color |
| COLORTERM | truecolor |
| FORCE_COLOR | *Not set* |
| NO_COLOR | *Not set* |
## Rich Console options
| Name | Value |
|----------------|----------------------|
| size | width=197, height=19 |
| legacy_windows | False |
| min_width | 1 |
| max_width | 197 |
| is_terminal | True |
| encoding | utf-8 |
| max_height | 19 |
| justify | None |
| overflow | None |
| no_wrap | False |
| highlight | None |
| markup | None |
| height | None |
</issue>
<code>
[start of src/textual/pilot.py]
1 from __future__ import annotations
2
3 import asyncio
4 from typing import Any, Generic
5
6 import rich.repr
7
8 from ._wait import wait_for_idle
9 from .app import App, ReturnType
10 from .css.query import QueryType
11 from .events import Click, MouseDown, MouseMove, MouseUp
12 from .geometry import Offset
13 from .widget import Widget
14
15
16 def _get_mouse_message_arguments(
17 target: Widget, offset: Offset = Offset(), button: int = 0
18 ) -> dict[str, Any]:
19 """Get the arguments to pass into mouse messages for the click and hover methods."""
20 x, y = offset
21 click_x, click_y, _, _ = target.region.translate(offset)
22 message_arguments = {
23 "x": x,
24 "y": y,
25 "delta_x": 0,
26 "delta_y": 0,
27 "button": button,
28 "shift": False,
29 "meta": False,
30 "ctrl": False,
31 "screen_x": click_x,
32 "screen_y": click_y,
33 }
34 return message_arguments
35
36
37 @rich.repr.auto(angular=True)
38 class Pilot(Generic[ReturnType]):
39 """Pilot object to drive an app."""
40
41 def __init__(self, app: App[ReturnType]) -> None:
42 self._app = app
43
44 def __rich_repr__(self) -> rich.repr.Result:
45 yield "app", self._app
46
47 @property
48 def app(self) -> App[ReturnType]:
49 """App: A reference to the application."""
50 return self._app
51
52 async def press(self, *keys: str) -> None:
53 """Simulate key-presses.
54
55 Args:
56 *keys: Keys to press.
57
58 """
59 if keys:
60 await self._app._press_keys(keys)
61
62 async def click(
63 self, selector: QueryType | None = None, offset: Offset = Offset()
64 ) -> None:
65 """Simulate clicking with the mouse.
66
67 Args:
68 selector: The widget that should be clicked. If None, then the click
69 will occur relative to the screen. Note that this simply causes
70 a click to occur at the location of the widget. If the widget is
71 currently hidden or obscured by another widget, then the click may
72 not land on it.
73 offset: The offset to click within the selected widget.
74 """
75 app = self.app
76 screen = app.screen
77 if selector is not None:
78 target_widget = screen.query_one(selector)
79 else:
80 target_widget = screen
81
82 message_arguments = _get_mouse_message_arguments(
83 target_widget, offset, button=1
84 )
85 app.post_message(MouseDown(**message_arguments))
86 app.post_message(MouseUp(**message_arguments))
87 app.post_message(Click(**message_arguments))
88 await self.pause()
89
90 async def hover(
91 self, selector: QueryType | None = None, offset: Offset = Offset()
92 ) -> None:
93 """Simulate hovering with the mouse cursor.
94
95 Args:
96 selector: The widget that should be hovered. If None, then the click
97 will occur relative to the screen. Note that this simply causes
98 a hover to occur at the location of the widget. If the widget is
99 currently hidden or obscured by another widget, then the hover may
100 not land on it.
101 offset: The offset to hover over within the selected widget.
102 """
103 app = self.app
104 screen = app.screen
105 if selector is not None:
106 target_widget = screen.query_one(selector)
107 else:
108 target_widget = screen
109
110 message_arguments = _get_mouse_message_arguments(
111 target_widget, offset, button=0
112 )
113 app.post_message(MouseMove(**message_arguments))
114 await self.pause()
115
116 async def pause(self, delay: float | None = None) -> None:
117 """Insert a pause.
118
119 Args:
120 delay: Seconds to pause, or None to wait for cpu idle.
121 """
122 # These sleep zeros, are to force asyncio to give up a time-slice,
123 if delay is None:
124 await wait_for_idle(0)
125 else:
126 await asyncio.sleep(delay)
127
128 async def wait_for_animation(self) -> None:
129 """Wait for any current animation to complete."""
130 await self._app.animator.wait_for_idle()
131
132 async def wait_for_scheduled_animations(self) -> None:
133 """Wait for any current and scheduled animations to complete."""
134 await self._app.animator.wait_until_complete()
135 await wait_for_idle()
136
137 async def exit(self, result: ReturnType) -> None:
138 """Exit the app with the given result.
139
140 Args:
141 result: The app result returned by `run` or `run_async`.
142 """
143 await wait_for_idle()
144 self.app.exit(result)
145
[end of src/textual/pilot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/pilot.py b/src/textual/pilot.py
--- a/src/textual/pilot.py
+++ b/src/textual/pilot.py
@@ -14,20 +14,24 @@
def _get_mouse_message_arguments(
- target: Widget, offset: Offset = Offset(), button: int = 0
+ target: Widget,
+ offset: Offset = Offset(),
+ button: int = 0,
+ shift: bool = False,
+ meta: bool = False,
+ control: bool = False,
) -> dict[str, Any]:
"""Get the arguments to pass into mouse messages for the click and hover methods."""
- x, y = offset
click_x, click_y, _, _ = target.region.translate(offset)
message_arguments = {
- "x": x,
- "y": y,
+ "x": click_x,
+ "y": click_y,
"delta_x": 0,
"delta_y": 0,
"button": button,
- "shift": False,
- "meta": False,
- "ctrl": False,
+ "shift": shift,
+ "meta": meta,
+ "ctrl": control,
"screen_x": click_x,
"screen_y": click_y,
}
@@ -60,7 +64,12 @@
await self._app._press_keys(keys)
async def click(
- self, selector: QueryType | None = None, offset: Offset = Offset()
+ self,
+ selector: QueryType | None = None,
+ offset: Offset = Offset(),
+ shift: bool = False,
+ meta: bool = False,
+ control: bool = False,
) -> None:
"""Simulate clicking with the mouse.
@@ -71,6 +80,9 @@
currently hidden or obscured by another widget, then the click may
not land on it.
offset: The offset to click within the selected widget.
+ shift: Click with the shift key held down.
+ meta: Click with the meta key held down.
+ control: Click with the control key held down.
"""
app = self.app
screen = app.screen
@@ -80,7 +92,7 @@
target_widget = screen
message_arguments = _get_mouse_message_arguments(
- target_widget, offset, button=1
+ target_widget, offset, button=1, shift=shift, meta=meta, control=control
)
app.post_message(MouseDown(**message_arguments))
app.post_message(MouseUp(**message_arguments))
| {"golden_diff": "diff --git a/src/textual/pilot.py b/src/textual/pilot.py\n--- a/src/textual/pilot.py\n+++ b/src/textual/pilot.py\n@@ -14,20 +14,24 @@\n \n \n def _get_mouse_message_arguments(\n- target: Widget, offset: Offset = Offset(), button: int = 0\n+ target: Widget,\n+ offset: Offset = Offset(),\n+ button: int = 0,\n+ shift: bool = False,\n+ meta: bool = False,\n+ control: bool = False,\n ) -> dict[str, Any]:\n \"\"\"Get the arguments to pass into mouse messages for the click and hover methods.\"\"\"\n- x, y = offset\n click_x, click_y, _, _ = target.region.translate(offset)\n message_arguments = {\n- \"x\": x,\n- \"y\": y,\n+ \"x\": click_x,\n+ \"y\": click_y,\n \"delta_x\": 0,\n \"delta_y\": 0,\n \"button\": button,\n- \"shift\": False,\n- \"meta\": False,\n- \"ctrl\": False,\n+ \"shift\": shift,\n+ \"meta\": meta,\n+ \"ctrl\": control,\n \"screen_x\": click_x,\n \"screen_y\": click_y,\n }\n@@ -60,7 +64,12 @@\n await self._app._press_keys(keys)\n \n async def click(\n- self, selector: QueryType | None = None, offset: Offset = Offset()\n+ self,\n+ selector: QueryType | None = None,\n+ offset: Offset = Offset(),\n+ shift: bool = False,\n+ meta: bool = False,\n+ control: bool = False,\n ) -> None:\n \"\"\"Simulate clicking with the mouse.\n \n@@ -71,6 +80,9 @@\n currently hidden or obscured by another widget, then the click may\n not land on it.\n offset: The offset to click within the selected widget.\n+ shift: Click with the shift key held down.\n+ meta: Click with the meta key held down.\n+ control: Click with the control key held down.\n \"\"\"\n app = self.app\n screen = app.screen\n@@ -80,7 +92,7 @@\n target_widget = screen\n \n message_arguments = _get_mouse_message_arguments(\n- target_widget, offset, button=1\n+ target_widget, offset, button=1, shift=shift, meta=meta, control=control\n )\n app.post_message(MouseDown(**message_arguments))\n app.post_message(MouseUp(**message_arguments))\n", "issue": "Testing Using Pilot - `pilot.click()` Doesn't Have Effect On `Static` Widget\nHi,\r\n\r\nI am trying to write tests for the example calculator app present in the repo (at the same time, expanding it to add a few more operators). This is my `CalculatorApp` class:\r\n\r\n```python\r\nclass CalculatorApp(App):\r\n CSS_PATH = \"calculator.css\"\r\n\r\n viewport = var(\"0\")\r\n show_ac = var(True)\r\n left = var(Decimal(\"0\"))\r\n right = var(Decimal(\"0\"))\r\n value = var(\"\")\r\n operator = var(\"plus\")\r\n\r\n def watch_viewport(self, value: str) -> None:\r\n self.query_one(\"#viewport\", Static).update(value)\r\n\r\n def compute_show_ac(self) -> bool:\r\n return self.value in (\"\", \"0\") and self.viewport == \"0\"\r\n\r\n def watch_show_ac(self, show_ac: bool) -> None:\r\n self.query_one(\"#ac\").display = show_ac\r\n self.query_one(\"#c\").display = not show_ac\r\n\r\n def compose(self) -> ComposeResult:\r\n with Container(id=\"calculator\"):\r\n yield Static(id=\"viewport\")\r\n yield Button(\"AC\", id=\"ac\", variant=\"primary\")\r\n yield Button(\"C\", id=\"c\", variant=\"primary\")\r\n yield Button(\"+/-\", id=\"negation\", variant=\"primary\")\r\n yield Button(\"%\", id=\"percent\", variant=\"primary\")\r\n yield Button(\"sin(x)\", id=\"sine\", variant=\"warning\")\r\n yield Button(\"cos(x)\", id=\"cosine\", variant=\"warning\")\r\n yield Button(\"7\", id=\"number-7\", variant=\"primary\")\r\n yield Button(\"8\", id=\"number-8\", variant=\"primary\")\r\n yield Button(\"9\", id=\"number-9\", variant=\"primary\")\r\n yield Button(\"+\", id=\"plus\", variant=\"warning\")\r\n yield Button(\"x^y\", id=\"exponent\", variant=\"warning\")\r\n yield Button(\"4\", id=\"number-4\", variant=\"primary\")\r\n yield Button(\"5\", id=\"number-5\", variant=\"primary\")\r\n yield Button(\"6\", id=\"number-6\", variant=\"primary\")\r\n yield Button(\"-\", id=\"minus\", variant=\"warning\")\r\n yield Button(\"ln(x)\", id=\"logarithm\", variant=\"warning\")\r\n yield Button(\"1\", id=\"number-1\", variant=\"primary\")\r\n yield Button(\"2\", id=\"number-2\", variant=\"primary\")\r\n yield Button(\"3\", id=\"number-3\", variant=\"primary\")\r\n yield Button(\"*\", id=\"multiply\", variant=\"warning\")\r\n yield Button(\"x!\", id=\"factorial\", variant=\"warning\")\r\n yield Button(\"0\", id=\"number-0\", variant=\"primary\")\r\n yield Button(\".\", id=\"point\", variant=\"primary\")\r\n yield Button(\"\u00f7\", id=\"divide\", variant=\"warning\")\r\n yield Button(\"=\", id=\"equals\", variant=\"warning\")\r\n\r\n def on_button_pressed(self, event: Button.Pressed) -> None:\r\n button_id = event.button.id\r\n\r\n assert button_id is not None\r\n\r\n if button_id.startswith(\"number-\"):\r\n number = button_id.split(\"-\")[-1]\r\n self.viewport = self.value = self.value.lstrip(\"0\") + number\r\n```\r\n\r\nI wrote the following test to check that clicking the number buttons results in the calculator's display (`Static(id=\"viewport\")`) accumulating digits to make a number:\r\n\r\n```python\r\nasync def test_number_buttons():\r\n async with CalculatorApp().run_test() as pilot:\r\n app = pilot.app\r\n\r\n await pilot.click(\"#number-1\")\r\n\r\n display_content = app.query_one(\"#viewport\").render()\r\n assert str(display_content) == \"1\"\r\n\r\n await pilot.click(\"#number-2\")\r\n\r\n display_content = app.query_one(\"#viewport\").render()\r\n assert str(display_content) == \"12\"\r\n\r\n await pilot.click(\"#number-3\")\r\n\r\n display_content = app.query_one(\"#viewport\").render()\r\n assert str(display_content) == \"123\"\r\n```\r\n\r\nWhile the GUI gets updated correctly on clicking the buttons, the test always fails since `app.query_one(\"#viewport\").render()` always returns `\"0\"`. I've also tried replacing `app.query_one(\"#viewport\").render()` with `app.query_one(\"#viewport\", Static).render()` but that hasn't helped either.\r\n\r\nIs this supposed to happen?\r\n\r\n# Textual Diagnostics\r\n\r\n## Versions\r\n\r\n| Name | Value |\r\n|---------|--------|\r\n| Textual | 0.14.0 |\r\n| Rich | 13.3.2 |\r\n\r\n## Python\r\n\r\n| Name | Value |\r\n|----------------|-----------------------------------------------|\r\n| Version | 3.10.9 |\r\n| Implementation | CPython |\r\n| Compiler | GCC 11.2.0 |\r\n| Executable | /home/malay_agr/anaconda3/envs/spe/bin/python |\r\n\r\n## Operating System\r\n\r\n| Name | Value |\r\n|---------|---------------------------------------------|\r\n| System | Linux |\r\n| Release | 5.15.0-52-generic |\r\n| Version | #58-Ubuntu SMP Thu Oct 13 08:03:55 UTC 2022 |\r\n\r\n## Terminal\r\n\r\n| Name | Value |\r\n|----------------------|-----------------|\r\n| Terminal Application | vscode (1.76.1) |\r\n| TERM | xterm-256color |\r\n| COLORTERM | truecolor |\r\n| FORCE_COLOR | *Not set* |\r\n| NO_COLOR | *Not set* |\r\n\r\n## Rich Console options\r\n\r\n| Name | Value |\r\n|----------------|----------------------|\r\n| size | width=197, height=19 |\r\n| legacy_windows | False |\r\n| min_width | 1 |\r\n| max_width | 197 |\r\n| is_terminal | True |\r\n| encoding | utf-8 |\r\n| max_height | 19 |\r\n| justify | None |\r\n| overflow | None |\r\n| no_wrap | False |\r\n| highlight | None |\r\n| markup | None |\r\n| height | None |\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nfrom typing import Any, Generic\n\nimport rich.repr\n\nfrom ._wait import wait_for_idle\nfrom .app import App, ReturnType\nfrom .css.query import QueryType\nfrom .events import Click, MouseDown, MouseMove, MouseUp\nfrom .geometry import Offset\nfrom .widget import Widget\n\n\ndef _get_mouse_message_arguments(\n target: Widget, offset: Offset = Offset(), button: int = 0\n) -> dict[str, Any]:\n \"\"\"Get the arguments to pass into mouse messages for the click and hover methods.\"\"\"\n x, y = offset\n click_x, click_y, _, _ = target.region.translate(offset)\n message_arguments = {\n \"x\": x,\n \"y\": y,\n \"delta_x\": 0,\n \"delta_y\": 0,\n \"button\": button,\n \"shift\": False,\n \"meta\": False,\n \"ctrl\": False,\n \"screen_x\": click_x,\n \"screen_y\": click_y,\n }\n return message_arguments\n\n\[email protected](angular=True)\nclass Pilot(Generic[ReturnType]):\n \"\"\"Pilot object to drive an app.\"\"\"\n\n def __init__(self, app: App[ReturnType]) -> None:\n self._app = app\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield \"app\", self._app\n\n @property\n def app(self) -> App[ReturnType]:\n \"\"\"App: A reference to the application.\"\"\"\n return self._app\n\n async def press(self, *keys: str) -> None:\n \"\"\"Simulate key-presses.\n\n Args:\n *keys: Keys to press.\n\n \"\"\"\n if keys:\n await self._app._press_keys(keys)\n\n async def click(\n self, selector: QueryType | None = None, offset: Offset = Offset()\n ) -> None:\n \"\"\"Simulate clicking with the mouse.\n\n Args:\n selector: The widget that should be clicked. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a click to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the click may\n not land on it.\n offset: The offset to click within the selected widget.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = screen.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=1\n )\n app.post_message(MouseDown(**message_arguments))\n app.post_message(MouseUp(**message_arguments))\n app.post_message(Click(**message_arguments))\n await self.pause()\n\n async def hover(\n self, selector: QueryType | None = None, offset: Offset = Offset()\n ) -> None:\n \"\"\"Simulate hovering with the mouse cursor.\n\n Args:\n selector: The widget that should be hovered. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a hover to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the hover may\n not land on it.\n offset: The offset to hover over within the selected widget.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = screen.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=0\n )\n app.post_message(MouseMove(**message_arguments))\n await self.pause()\n\n async def pause(self, delay: float | None = None) -> None:\n \"\"\"Insert a pause.\n\n Args:\n delay: Seconds to pause, or None to wait for cpu idle.\n \"\"\"\n # These sleep zeros, are to force asyncio to give up a time-slice,\n if delay is None:\n await wait_for_idle(0)\n else:\n await asyncio.sleep(delay)\n\n async def wait_for_animation(self) -> None:\n \"\"\"Wait for any current animation to complete.\"\"\"\n await self._app.animator.wait_for_idle()\n\n async def wait_for_scheduled_animations(self) -> None:\n \"\"\"Wait for any current and scheduled animations to complete.\"\"\"\n await self._app.animator.wait_until_complete()\n await wait_for_idle()\n\n async def exit(self, result: ReturnType) -> None:\n \"\"\"Exit the app with the given result.\n\n Args:\n result: The app result returned by `run` or `run_async`.\n \"\"\"\n await wait_for_idle()\n self.app.exit(result)\n", "path": "src/textual/pilot.py"}]} | 3,206 | 576 |
gh_patches_debug_36083 | rasdani/github-patches | git_diff | microsoft__Qcodes-771 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Data mingled across columns in dataset
### Steps to reproduce
1. Measure something with `.run(use_threads=True)`
### Expected behaviour
Get a neat dataset
### Actual behaviour
Data mingles across columns in dataset 💘
### System
**operating system**
Win 10
**qcodes branch**
Master
Two identical measurements:

</issue>
<code>
[start of qcodes/actions.py]
1 """Actions, mainly to be executed in measurement Loops."""
2 import time
3
4 from qcodes.utils.deferred_operations import is_function
5 from qcodes.utils.threading import thread_map
6
7
8 _NO_SNAPSHOT = {'type': None, 'description': 'Action without snapshot'}
9
10
11 def _actions_snapshot(actions, update):
12 """Make a list of snapshots from a list of actions."""
13 snapshot = []
14 for action in actions:
15 if hasattr(action, 'snapshot'):
16 snapshot.append(action.snapshot(update=update))
17 else:
18 snapshot.append(_NO_SNAPSHOT)
19 return snapshot
20
21
22 class Task:
23 """
24 A predefined task to be executed within a measurement Loop.
25
26 The first argument should be a callable, to which any subsequent
27 args and kwargs (which are evaluated before the loop starts) are passed.
28
29 The args and kwargs are first evaluated if they are found to be callable.
30
31 Keyword Args passed when the Task is called are ignored,
32 but are accepted for compatibility with other things happening in a Loop.
33
34 Args:
35 func (callable): Function to executed
36 *args: pass to func, after evaluation if callable
37 **kwargs: pass to func, after evaluation if callable
38
39 """
40 def __init__(self, func, *args, **kwargs):
41 self.func = func
42 self.args = args
43 self.kwargs = kwargs
44
45 def __call__(self, **ignore_kwargs):
46 # If any of the arguments are callable, evaluate them first
47 eval_args = [arg() if callable(arg) else arg for arg in self.args]
48 eval_kwargs = {k: (v() if callable(v) else v) for k, v in self.kwargs.items()}
49
50 self.func(*eval_args, **eval_kwargs)
51
52 def snapshot(self, update=False):
53 """
54 Snapshots task
55 Args:
56 update (bool): TODO not in use
57
58 Returns:
59 dict: snapshot
60 """
61 return {'type': 'Task', 'func': repr(self.func)}
62
63
64 class Wait:
65 """
66 A simple class to tell a Loop to wait <delay> seconds.
67
68 This is transformed into a Task within the Loop, such that
69 it can do other things (monitor, check for halt) during the delay.
70
71 But for use outside of a Loop, it is also callable (then it just sleeps)
72
73 Args:
74 delay: seconds to delay
75
76 Raises:
77 ValueError: if delay is negative
78 """
79 def __init__(self, delay):
80 if not delay >= 0:
81 raise ValueError('delay must be > 0, not {}'.format(repr(delay)))
82 self.delay = delay
83
84 def __call__(self):
85 if self.delay:
86 time.sleep(self.delay)
87
88 def snapshot(self, update=False):
89 """
90 Snapshots delay
91 Args:
92 update (bool): TODO not in use
93
94 Returns:
95 dict: snapshot
96 """
97 return {'type': 'Wait', 'delay': self.delay}
98
99
100 class _Measure:
101 """
102 A callable collection of parameters to measure.
103
104 This should not be constructed manually, only by an ActiveLoop.
105 """
106 def __init__(self, params_indices, data_set, use_threads):
107 self.use_threads = use_threads and len(params_indices) > 1
108 # the applicable DataSet.store function
109 self.store = data_set.store
110
111 # for performance, pre-calculate which params return data for
112 # multiple arrays, and the name mappings
113 self.getters = []
114 self.param_ids = []
115 self.composite = []
116 for param, action_indices in params_indices:
117 self.getters.append(param.get)
118
119 if hasattr(param, 'names'):
120 part_ids = []
121 for i in range(len(param.names)):
122 param_id = data_set.action_id_map[action_indices + (i,)]
123 part_ids.append(param_id)
124 self.param_ids.append(None)
125 self.composite.append(part_ids)
126 else:
127 param_id = data_set.action_id_map[action_indices]
128 self.param_ids.append(param_id)
129 self.composite.append(False)
130
131 def __call__(self, loop_indices, **ignore_kwargs):
132 out_dict = {}
133 if self.use_threads:
134 out = thread_map(self.getters)
135 else:
136 out = [g() for g in self.getters]
137
138 for param_out, param_id, composite in zip(out, self.param_ids,
139 self.composite):
140 if composite:
141 for val, part_id in zip(param_out, composite):
142 out_dict[part_id] = val
143 else:
144 out_dict[param_id] = param_out
145
146 self.store(loop_indices, out_dict)
147
148
149 class _Nest:
150
151 """
152 Wrapper to make a callable nested ActiveLoop.
153
154 This should not be constructed manually, only by an ActiveLoop.
155 """
156
157 def __init__(self, inner_loop, action_indices):
158 self.inner_loop = inner_loop
159 self.action_indices = action_indices
160
161 def __call__(self, **kwargs):
162 self.inner_loop._run_loop(action_indices=self.action_indices, **kwargs)
163
164
165 class BreakIf:
166
167 """
168 Loop action that breaks out of the loop if a condition is truthy.
169
170 Args:
171 condition (callable): a callable taking no arguments.
172 Can be a simple function that returns truthy when it's time to quit
173 May also be constructed by deferred operations on `Parameter`.
174 Raises:
175 TypeError: if condition is not a callable with no aguments.
176
177 Examples:
178 >>> BreakIf(gates.chan1 >= 3)
179 >>> BreakIf(abs(source.I * source.V) >= source.power_limit.get_latest)
180 """
181
182 def __init__(self, condition):
183 if not is_function(condition, 0):
184 raise TypeError('BreakIf condition must be a callable with '
185 'no arguments')
186 self.condition = condition
187
188 def __call__(self, **ignore_kwargs):
189 if self.condition():
190 raise _QcodesBreak
191
192 def snapshot(self, update=False):
193 # TODO: make nice reprs for DeferredOperations
194 """
195 Snapshots breakif action
196 Args:
197 update (bool): TODO not in use
198
199 Returns:
200 dict: snapshot
201
202 """
203 return {'type': 'BreakIf', 'condition': repr(self.condition)}
204
205
206 class _QcodesBreak(Exception):
207 pass
208
[end of qcodes/actions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qcodes/actions.py b/qcodes/actions.py
--- a/qcodes/actions.py
+++ b/qcodes/actions.py
@@ -8,6 +8,12 @@
_NO_SNAPSHOT = {'type': None, 'description': 'Action without snapshot'}
+# exception when threading is attempted used to simultaneously
+# query the same instrument for several values
+class UnsafeThreadingException(Exception):
+ pass
+
+
def _actions_snapshot(actions, update):
"""Make a list of snapshots from a list of actions."""
snapshot = []
@@ -113,9 +119,13 @@
self.getters = []
self.param_ids = []
self.composite = []
+ paramcheck = [] # list to check if parameters are unique
for param, action_indices in params_indices:
self.getters.append(param.get)
+ if param._instrument:
+ paramcheck.append((param, param._instrument))
+
if hasattr(param, 'names'):
part_ids = []
for i in range(len(param.names)):
@@ -128,6 +138,17 @@
self.param_ids.append(param_id)
self.composite.append(False)
+ if self.use_threads:
+ insts = [p[1] for p in paramcheck]
+ if (len(set(insts)) != len(insts)):
+ duplicates = [p for p in paramcheck if insts.count(p[1]) > 1]
+ raise UnsafeThreadingException('Can not use threading to '
+ 'read '
+ 'several things from the same '
+ 'instrument. Specifically, you '
+ 'asked for'
+ ' {}.'.format(duplicates))
+
def __call__(self, loop_indices, **ignore_kwargs):
out_dict = {}
if self.use_threads:
@@ -166,7 +187,7 @@
"""
Loop action that breaks out of the loop if a condition is truthy.
-
+
Args:
condition (callable): a callable taking no arguments.
Can be a simple function that returns truthy when it's time to quit
| {"golden_diff": "diff --git a/qcodes/actions.py b/qcodes/actions.py\n--- a/qcodes/actions.py\n+++ b/qcodes/actions.py\n@@ -8,6 +8,12 @@\n _NO_SNAPSHOT = {'type': None, 'description': 'Action without snapshot'}\n \n \n+# exception when threading is attempted used to simultaneously\n+# query the same instrument for several values\n+class UnsafeThreadingException(Exception):\n+ pass\n+\n+\n def _actions_snapshot(actions, update):\n \"\"\"Make a list of snapshots from a list of actions.\"\"\"\n snapshot = []\n@@ -113,9 +119,13 @@\n self.getters = []\n self.param_ids = []\n self.composite = []\n+ paramcheck = [] # list to check if parameters are unique\n for param, action_indices in params_indices:\n self.getters.append(param.get)\n \n+ if param._instrument:\n+ paramcheck.append((param, param._instrument))\n+\n if hasattr(param, 'names'):\n part_ids = []\n for i in range(len(param.names)):\n@@ -128,6 +138,17 @@\n self.param_ids.append(param_id)\n self.composite.append(False)\n \n+ if self.use_threads:\n+ insts = [p[1] for p in paramcheck]\n+ if (len(set(insts)) != len(insts)):\n+ duplicates = [p for p in paramcheck if insts.count(p[1]) > 1]\n+ raise UnsafeThreadingException('Can not use threading to '\n+ 'read '\n+ 'several things from the same '\n+ 'instrument. Specifically, you '\n+ 'asked for'\n+ ' {}.'.format(duplicates))\n+\n def __call__(self, loop_indices, **ignore_kwargs):\n out_dict = {}\n if self.use_threads:\n@@ -166,7 +187,7 @@\n \n \"\"\"\n Loop action that breaks out of the loop if a condition is truthy.\n- \n+\n Args:\n condition (callable): a callable taking no arguments.\n Can be a simple function that returns truthy when it's time to quit\n", "issue": "Data mingled across columns in dataset\n### Steps to reproduce\r\n1. Measure something with `.run(use_threads=True)`\r\n\r\n### Expected behaviour\r\nGet a neat dataset\r\n\r\n### Actual behaviour\r\nData mingles across columns in dataset \ud83d\udc98 \r\n\r\n### System\r\n**operating system**\r\nWin 10\r\n\r\n**qcodes branch**\r\nMaster\r\n\r\nTwo identical measurements:\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Actions, mainly to be executed in measurement Loops.\"\"\"\nimport time\n\nfrom qcodes.utils.deferred_operations import is_function\nfrom qcodes.utils.threading import thread_map\n\n\n_NO_SNAPSHOT = {'type': None, 'description': 'Action without snapshot'}\n\n\ndef _actions_snapshot(actions, update):\n \"\"\"Make a list of snapshots from a list of actions.\"\"\"\n snapshot = []\n for action in actions:\n if hasattr(action, 'snapshot'):\n snapshot.append(action.snapshot(update=update))\n else:\n snapshot.append(_NO_SNAPSHOT)\n return snapshot\n\n\nclass Task:\n \"\"\"\n A predefined task to be executed within a measurement Loop.\n\n The first argument should be a callable, to which any subsequent\n args and kwargs (which are evaluated before the loop starts) are passed.\n\n The args and kwargs are first evaluated if they are found to be callable.\n\n Keyword Args passed when the Task is called are ignored,\n but are accepted for compatibility with other things happening in a Loop.\n\n Args:\n func (callable): Function to executed\n *args: pass to func, after evaluation if callable\n **kwargs: pass to func, after evaluation if callable\n\n \"\"\"\n def __init__(self, func, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self, **ignore_kwargs):\n # If any of the arguments are callable, evaluate them first\n eval_args = [arg() if callable(arg) else arg for arg in self.args]\n eval_kwargs = {k: (v() if callable(v) else v) for k, v in self.kwargs.items()}\n\n self.func(*eval_args, **eval_kwargs)\n\n def snapshot(self, update=False):\n \"\"\"\n Snapshots task\n Args:\n update (bool): TODO not in use\n\n Returns:\n dict: snapshot\n \"\"\"\n return {'type': 'Task', 'func': repr(self.func)}\n\n\nclass Wait:\n \"\"\"\n A simple class to tell a Loop to wait <delay> seconds.\n\n This is transformed into a Task within the Loop, such that\n it can do other things (monitor, check for halt) during the delay.\n\n But for use outside of a Loop, it is also callable (then it just sleeps)\n\n Args:\n delay: seconds to delay\n\n Raises:\n ValueError: if delay is negative\n \"\"\"\n def __init__(self, delay):\n if not delay >= 0:\n raise ValueError('delay must be > 0, not {}'.format(repr(delay)))\n self.delay = delay\n\n def __call__(self):\n if self.delay:\n time.sleep(self.delay)\n\n def snapshot(self, update=False):\n \"\"\"\n Snapshots delay\n Args:\n update (bool): TODO not in use\n\n Returns:\n dict: snapshot\n \"\"\"\n return {'type': 'Wait', 'delay': self.delay}\n\n\nclass _Measure:\n \"\"\"\n A callable collection of parameters to measure.\n\n This should not be constructed manually, only by an ActiveLoop.\n \"\"\"\n def __init__(self, params_indices, data_set, use_threads):\n self.use_threads = use_threads and len(params_indices) > 1\n # the applicable DataSet.store function\n self.store = data_set.store\n\n # for performance, pre-calculate which params return data for\n # multiple arrays, and the name mappings\n self.getters = []\n self.param_ids = []\n self.composite = []\n for param, action_indices in params_indices:\n self.getters.append(param.get)\n\n if hasattr(param, 'names'):\n part_ids = []\n for i in range(len(param.names)):\n param_id = data_set.action_id_map[action_indices + (i,)]\n part_ids.append(param_id)\n self.param_ids.append(None)\n self.composite.append(part_ids)\n else:\n param_id = data_set.action_id_map[action_indices]\n self.param_ids.append(param_id)\n self.composite.append(False)\n\n def __call__(self, loop_indices, **ignore_kwargs):\n out_dict = {}\n if self.use_threads:\n out = thread_map(self.getters)\n else:\n out = [g() for g in self.getters]\n\n for param_out, param_id, composite in zip(out, self.param_ids,\n self.composite):\n if composite:\n for val, part_id in zip(param_out, composite):\n out_dict[part_id] = val\n else:\n out_dict[param_id] = param_out\n\n self.store(loop_indices, out_dict)\n\n\nclass _Nest:\n\n \"\"\"\n Wrapper to make a callable nested ActiveLoop.\n\n This should not be constructed manually, only by an ActiveLoop.\n \"\"\"\n\n def __init__(self, inner_loop, action_indices):\n self.inner_loop = inner_loop\n self.action_indices = action_indices\n\n def __call__(self, **kwargs):\n self.inner_loop._run_loop(action_indices=self.action_indices, **kwargs)\n\n\nclass BreakIf:\n\n \"\"\"\n Loop action that breaks out of the loop if a condition is truthy.\n \n Args:\n condition (callable): a callable taking no arguments.\n Can be a simple function that returns truthy when it's time to quit\n May also be constructed by deferred operations on `Parameter`.\n Raises:\n TypeError: if condition is not a callable with no aguments.\n\n Examples:\n >>> BreakIf(gates.chan1 >= 3)\n >>> BreakIf(abs(source.I * source.V) >= source.power_limit.get_latest)\n \"\"\"\n\n def __init__(self, condition):\n if not is_function(condition, 0):\n raise TypeError('BreakIf condition must be a callable with '\n 'no arguments')\n self.condition = condition\n\n def __call__(self, **ignore_kwargs):\n if self.condition():\n raise _QcodesBreak\n\n def snapshot(self, update=False):\n # TODO: make nice reprs for DeferredOperations\n \"\"\"\n Snapshots breakif action\n Args:\n update (bool): TODO not in use\n\n Returns:\n dict: snapshot\n\n \"\"\"\n return {'type': 'BreakIf', 'condition': repr(self.condition)}\n\n\nclass _QcodesBreak(Exception):\n pass\n", "path": "qcodes/actions.py"}]} | 2,556 | 464 |
gh_patches_debug_28691 | rasdani/github-patches | git_diff | elastic__apm-agent-python-998 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
5.10.0 "kind of" regression for elasticsearch instrumentation due to logging of whole body
**Describe the bug**:
The elasticsearch library makes it possible to define your own custom serializer as part of the library. However, the Python agent doesn't take this into account, which can lead to wrong request bodies being collected, and it is possible to downright crash the agent (which unfortunately was what happened to us).
The problem seems to have happened with #940.
**To Reproduce - wrong body**
https://gist.github.com/HenrikOssipoff/a231b45ce504691cd71a5de23a771132
This gist shows a simple Starlette application with a custom serializer.
Using the code, the following document is correctly indexed:
```json
{"_index":"test-index","_type":"_doc","_id":"zrmN0nYBBrB60DkP7BVZ","_score":1.0,"_source":{"numbers":2}}
```
However, the wrong body is recorded with the agent:

**To Reproduce - crash**
The following gist shows an application that works perfectly fine with the elasticsearch client, but crashes the apm agent:
https://gist.github.com/HenrikOssipoff/2219d059d3156ab8b11879e1191cefe4
Causes exception `TypeError: keys must be str, int, float, bool or None, not NumberObj`.
I'll admit this is some weird code; this is a simplified version of what we currently use.
The version in our app uses Django's translation module as keys for certain parts of the query, which corresponds to `django.utils.functional.Promise` objects. We're basically looping over these to force-evaluate them, as they're lazy objects.
It works perfectly fine, but crashes the agent.
**Environment (please complete the following information)**
- OS: [e.g. Linux] Linux/Mac
- Python version: 3.9
- Framework and version [e.g. Django 2.1]: Starlette, Django (any version, probably)
- APM Server version: 7.8.0
- Agent version: >= 5.10.0
**Additional context**
I'm not actually sure how best to solve this. The elasticsearch library already supports a wide array of custom serialization, even without using your own custom one: https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py
The best case (from a user perspective), would be for the agent to somehow find the correct serializer to use, when dealing with the body. I'm not sure if this is even possible.
</issue>
<code>
[start of elasticapm/instrumentation/packages/elasticsearch.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 import json
34
35 import elasticapm
36 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
37 from elasticapm.utils import compat
38 from elasticapm.utils.logging import get_logger
39
40 logger = get_logger("elasticapm.instrument")
41
42
43 API_METHOD_KEY_NAME = "__elastic_apm_api_method_name"
44 BODY_REF_NAME = "__elastic_apm_body_ref"
45
46
47 class ElasticSearchConnectionMixin(object):
48 query_methods = ("search", "count", "delete_by_query")
49
50 def get_signature(self, args, kwargs):
51 args_len = len(args)
52 http_method = args[0] if args_len else kwargs.get("method")
53 http_path = args[1] if args_len > 1 else kwargs.get("url")
54
55 return "ES %s %s" % (http_method, http_path)
56
57 def get_context(self, instance, args, kwargs):
58 args_len = len(args)
59 params = args[2] if args_len > 2 else kwargs.get("params")
60 body = params.pop(BODY_REF_NAME, None) if params else None
61
62 api_method = params.pop(API_METHOD_KEY_NAME, None) if params else None
63 context = {"db": {"type": "elasticsearch"}}
64 if api_method in self.query_methods:
65 query = []
66 # using both q AND body is allowed in some API endpoints / ES versions,
67 # but not in others. We simply capture both if they are there so the
68 # user can see it.
69 if params and "q" in params:
70 # 'q' is already encoded to a byte string at this point
71 # we assume utf8, which is the default
72 query.append("q=" + params["q"].decode("utf-8", errors="replace"))
73 if body and isinstance(body, dict):
74 query.append(json.dumps(body, default=compat.text_type))
75 if query:
76 context["db"]["statement"] = "\n\n".join(query)
77 elif api_method == "update":
78 if isinstance(body, dict) and "script" in body:
79 # only get the `script` field from the body
80 context["db"]["statement"] = json.dumps({"script": body["script"]})
81 context["destination"] = {
82 "address": instance.host,
83 "service": {"name": "elasticsearch", "resource": "elasticsearch", "type": "db"},
84 }
85 return context
86
87
88 class ElasticsearchConnectionInstrumentation(ElasticSearchConnectionMixin, AbstractInstrumentedModule):
89 name = "elasticsearch_connection"
90
91 instrument_list = [
92 ("elasticsearch.connection.http_urllib3", "Urllib3HttpConnection.perform_request"),
93 ("elasticsearch.connection.http_requests", "RequestsHttpConnection.perform_request"),
94 ]
95
96 def call(self, module, method, wrapped, instance, args, kwargs):
97 signature = self.get_signature(args, kwargs)
98 context = self.get_context(instance, args, kwargs)
99
100 with elasticapm.capture_span(
101 signature,
102 span_type="db",
103 span_subtype="elasticsearch",
104 span_action="query",
105 extra=context,
106 skip_frames=2,
107 leaf=True,
108 ):
109 return wrapped(*args, **kwargs)
110
111
112 class ElasticsearchInstrumentation(AbstractInstrumentedModule):
113 name = "elasticsearch"
114
115 instrument_list = [
116 ("elasticsearch.client", "Elasticsearch.delete_by_query"),
117 ("elasticsearch.client", "Elasticsearch.search"),
118 ("elasticsearch.client", "Elasticsearch.count"),
119 ("elasticsearch.client", "Elasticsearch.update"),
120 ]
121
122 def __init__(self):
123 super(ElasticsearchInstrumentation, self).__init__()
124 try:
125 from elasticsearch import VERSION
126
127 self.version = VERSION[0]
128 except ImportError:
129 self.version = None
130
131 def instrument(self):
132 if self.version and not 2 <= self.version < 8:
133 logger.debug("Instrumenting version %s of Elasticsearch is not supported by Elastic APM", self.version)
134 return
135 super(ElasticsearchInstrumentation, self).instrument()
136
137 def call(self, module, method, wrapped, instance, args, kwargs):
138 kwargs = self.inject_apm_params(method, kwargs)
139 return wrapped(*args, **kwargs)
140
141 def inject_apm_params(self, method, kwargs):
142 params = kwargs.pop("params", {})
143
144 # make a copy of params in case the caller reuses them for some reason
145 params = params.copy() if params is not None else {}
146
147 method_name = method.partition(".")[-1]
148
149 # store a reference to the non-serialized body so we can use it in the connection layer
150 body = kwargs.get("body")
151 params[BODY_REF_NAME] = body
152 params[API_METHOD_KEY_NAME] = method_name
153
154 kwargs["params"] = params
155 return kwargs
156
[end of elasticapm/instrumentation/packages/elasticsearch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py
--- a/elasticapm/instrumentation/packages/elasticsearch.py
+++ b/elasticapm/instrumentation/packages/elasticsearch.py
@@ -58,6 +58,7 @@
args_len = len(args)
params = args[2] if args_len > 2 else kwargs.get("params")
body = params.pop(BODY_REF_NAME, None) if params else None
+ body_serialized = args[3] if args_len > 3 else kwargs.get("body")
api_method = params.pop(API_METHOD_KEY_NAME, None) if params else None
context = {"db": {"type": "elasticsearch"}}
@@ -70,8 +71,16 @@
# 'q' is already encoded to a byte string at this point
# we assume utf8, which is the default
query.append("q=" + params["q"].decode("utf-8", errors="replace"))
- if body and isinstance(body, dict):
- query.append(json.dumps(body, default=compat.text_type))
+ if body_serialized:
+ if isinstance(body_serialized, bytes):
+ query.append(body_serialized.decode("utf-8", errors="replace"))
+ else:
+ query.append(body_serialized)
+ elif body and isinstance(body, dict):
+ try:
+ query.append(json.dumps(body, default=compat.text_type))
+ except TypeError:
+ pass
if query:
context["db"]["statement"] = "\n\n".join(query)
elif api_method == "update":
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py\n--- a/elasticapm/instrumentation/packages/elasticsearch.py\n+++ b/elasticapm/instrumentation/packages/elasticsearch.py\n@@ -58,6 +58,7 @@\n args_len = len(args)\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body = params.pop(BODY_REF_NAME, None) if params else None\n+ body_serialized = args[3] if args_len > 3 else kwargs.get(\"body\")\n \n api_method = params.pop(API_METHOD_KEY_NAME, None) if params else None\n context = {\"db\": {\"type\": \"elasticsearch\"}}\n@@ -70,8 +71,16 @@\n # 'q' is already encoded to a byte string at this point\n # we assume utf8, which is the default\n query.append(\"q=\" + params[\"q\"].decode(\"utf-8\", errors=\"replace\"))\n- if body and isinstance(body, dict):\n- query.append(json.dumps(body, default=compat.text_type))\n+ if body_serialized:\n+ if isinstance(body_serialized, bytes):\n+ query.append(body_serialized.decode(\"utf-8\", errors=\"replace\"))\n+ else:\n+ query.append(body_serialized)\n+ elif body and isinstance(body, dict):\n+ try:\n+ query.append(json.dumps(body, default=compat.text_type))\n+ except TypeError:\n+ pass\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n elif api_method == \"update\":\n", "issue": "5.10.0 \"kind of\" regression for elasticsearch instrumentation due to logging of whole body\n**Describe the bug**:\r\n\r\nThe elasticsearch library makes it possible to define your own custom serializer as part of the library. However, the Python agent doesn't take this into account, which can lead to wrong request bodies being collected, and it is possible to downright crash the agent (which unfortunately was what happened to us).\r\n\r\nThe problem seems to have happened with #940.\r\n\r\n**To Reproduce - wrong body**\r\n\r\nhttps://gist.github.com/HenrikOssipoff/a231b45ce504691cd71a5de23a771132\r\n\r\nThis gist shows a simple Starlette application with a custom serializer.\r\n\r\nUsing the code, the following document is correctly indexed:\r\n\r\n```json\r\n{\"_index\":\"test-index\",\"_type\":\"_doc\",\"_id\":\"zrmN0nYBBrB60DkP7BVZ\",\"_score\":1.0,\"_source\":{\"numbers\":2}}\r\n```\r\n\r\nHowever, the wrong body is recorded with the agent:\r\n\r\n\r\n\r\n**To Reproduce - crash**\r\n\r\nThe following gist shows an application that works perfectly fine with the elasticsearch client, but crashes the apm agent:\r\n\r\nhttps://gist.github.com/HenrikOssipoff/2219d059d3156ab8b11879e1191cefe4\r\n\r\nCauses exception `TypeError: keys must be str, int, float, bool or None, not NumberObj`.\r\n\r\nI'll admit this is some weird code; this is a simplified version of what we currently use.\r\nThe version in our app uses Django's translation module as keys for certain parts of the query, which corresponds to `django.utils.functional.Promise` objects. We're basically looping over these to force-evaluate them, as they're lazy objects.\r\n\r\nIt works perfectly fine, but crashes the agent.\r\n\r\n**Environment (please complete the following information)**\r\n- OS: [e.g. Linux] Linux/Mac\r\n- Python version: 3.9\r\n- Framework and version [e.g. Django 2.1]: Starlette, Django (any version, probably)\r\n- APM Server version: 7.8.0\r\n- Agent version: >= 5.10.0\r\n\r\n**Additional context**\r\n\r\nI'm not actually sure how best to solve this. The elasticsearch library already supports a wide array of custom serialization, even without using your own custom one: https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py\r\n\r\nThe best case (from a user perspective), would be for the agent to somehow find the correct serializer to use, when dealing with the body. I'm not sure if this is even possible.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nimport json\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.utils import compat\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\n\nAPI_METHOD_KEY_NAME = \"__elastic_apm_api_method_name\"\nBODY_REF_NAME = \"__elastic_apm_body_ref\"\n\n\nclass ElasticSearchConnectionMixin(object):\n query_methods = (\"search\", \"count\", \"delete_by_query\")\n\n def get_signature(self, args, kwargs):\n args_len = len(args)\n http_method = args[0] if args_len else kwargs.get(\"method\")\n http_path = args[1] if args_len > 1 else kwargs.get(\"url\")\n\n return \"ES %s %s\" % (http_method, http_path)\n\n def get_context(self, instance, args, kwargs):\n args_len = len(args)\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body = params.pop(BODY_REF_NAME, None) if params else None\n\n api_method = params.pop(API_METHOD_KEY_NAME, None) if params else None\n context = {\"db\": {\"type\": \"elasticsearch\"}}\n if api_method in self.query_methods:\n query = []\n # using both q AND body is allowed in some API endpoints / ES versions,\n # but not in others. We simply capture both if they are there so the\n # user can see it.\n if params and \"q\" in params:\n # 'q' is already encoded to a byte string at this point\n # we assume utf8, which is the default\n query.append(\"q=\" + params[\"q\"].decode(\"utf-8\", errors=\"replace\"))\n if body and isinstance(body, dict):\n query.append(json.dumps(body, default=compat.text_type))\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n elif api_method == \"update\":\n if isinstance(body, dict) and \"script\" in body:\n # only get the `script` field from the body\n context[\"db\"][\"statement\"] = json.dumps({\"script\": body[\"script\"]})\n context[\"destination\"] = {\n \"address\": instance.host,\n \"service\": {\"name\": \"elasticsearch\", \"resource\": \"elasticsearch\", \"type\": \"db\"},\n }\n return context\n\n\nclass ElasticsearchConnectionInstrumentation(ElasticSearchConnectionMixin, AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n instrument_list = [\n (\"elasticsearch.connection.http_urllib3\", \"Urllib3HttpConnection.perform_request\"),\n (\"elasticsearch.connection.http_requests\", \"RequestsHttpConnection.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n signature = self.get_signature(args, kwargs)\n context = self.get_context(instance, args, kwargs)\n\n with elasticapm.capture_span(\n signature,\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra=context,\n skip_frames=2,\n leaf=True,\n ):\n return wrapped(*args, **kwargs)\n\n\nclass ElasticsearchInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch\"\n\n instrument_list = [\n (\"elasticsearch.client\", \"Elasticsearch.delete_by_query\"),\n (\"elasticsearch.client\", \"Elasticsearch.search\"),\n (\"elasticsearch.client\", \"Elasticsearch.count\"),\n (\"elasticsearch.client\", \"Elasticsearch.update\"),\n ]\n\n def __init__(self):\n super(ElasticsearchInstrumentation, self).__init__()\n try:\n from elasticsearch import VERSION\n\n self.version = VERSION[0]\n except ImportError:\n self.version = None\n\n def instrument(self):\n if self.version and not 2 <= self.version < 8:\n logger.debug(\"Instrumenting version %s of Elasticsearch is not supported by Elastic APM\", self.version)\n return\n super(ElasticsearchInstrumentation, self).instrument()\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n kwargs = self.inject_apm_params(method, kwargs)\n return wrapped(*args, **kwargs)\n\n def inject_apm_params(self, method, kwargs):\n params = kwargs.pop(\"params\", {})\n\n # make a copy of params in case the caller reuses them for some reason\n params = params.copy() if params is not None else {}\n\n method_name = method.partition(\".\")[-1]\n\n # store a reference to the non-serialized body so we can use it in the connection layer\n body = kwargs.get(\"body\")\n params[BODY_REF_NAME] = body\n params[API_METHOD_KEY_NAME] = method_name\n\n kwargs[\"params\"] = params\n return kwargs\n", "path": "elasticapm/instrumentation/packages/elasticsearch.py"}]} | 2,966 | 361 |
gh_patches_debug_15375 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make `LXMLMixin.lxmlize` use scraper's `session`, rather than a raw `request.get()`
Otherwise it slows down `--fast` scrapes because it doesn't use cached responses.
</issue>
<code>
[start of openstates/utils/lxmlize.py]
1 import requests
2 import lxml.html
3
4
5 class LXMLMixin(object):
6 """Mixin for adding LXML helper functions to Open States code."""
7
8 def lxmlize(self, url, raise_exceptions=False):
9 """Parses document into an LXML object and makes links absolute.
10
11 Args:
12 url (str): URL of the document to parse.
13 Returns:
14 Element: Document node representing the page.
15 """
16 try:
17 response = requests.get(url)
18 except requests.exceptions.SSLError:
19 self.warning('`self.lxmlize()` failed due to SSL error, trying'\
20 'an unverified `requests.get()`')
21 response = requests.get(url, verify=False)
22
23 if raise_exceptions:
24 response.raise_for_status()
25
26 page = lxml.html.fromstring(response.text)
27 page.make_links_absolute(url)
28
29 return page
30
31 def get_node(self, base_node, xpath_query):
32 """Searches for node in an element tree.
33
34 Attempts to return only the first node found for an xpath query. Meant
35 to cut down on exception handling boilerplate.
36
37 Args:
38 base_node (Element): Document node to begin querying from.
39 xpath_query (str): XPath query to define nodes to search for.
40 Returns:
41 Element: First node found that matches the query.
42 """
43 try:
44 node = base_node.xpath(xpath_query)[0]
45 except IndexError:
46 node = None
47
48 return node
49
50 def get_nodes(self, base_node, xpath_query):
51 """Searches for nodes in an element tree.
52
53 Attempts to return all nodes found for an xpath query. Meant to cut
54 down on exception handling boilerplate.
55
56 Args:
57 base_node (Element): Document node to begin querying from.
58 xpath_query (str): Xpath query to define nodes to search for.
59 Returns:
60 List[Element]: All nodes found that match the query.
61 """
62 return base_node.xpath(xpath_query)
63
[end of openstates/utils/lxmlize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/utils/lxmlize.py b/openstates/utils/lxmlize.py
--- a/openstates/utils/lxmlize.py
+++ b/openstates/utils/lxmlize.py
@@ -14,11 +14,13 @@
Element: Document node representing the page.
"""
try:
- response = requests.get(url)
+ # This class is always mixed into subclasses of `billy.Scraper`,
+ # which have a `get` method defined.
+ response = self.get(url)
except requests.exceptions.SSLError:
self.warning('`self.lxmlize()` failed due to SSL error, trying'\
- 'an unverified `requests.get()`')
- response = requests.get(url, verify=False)
+ 'an unverified `self.get()` (i.e. `requests.get()`)')
+ response = self.get(url, verify=False)
if raise_exceptions:
response.raise_for_status()
| {"golden_diff": "diff --git a/openstates/utils/lxmlize.py b/openstates/utils/lxmlize.py\n--- a/openstates/utils/lxmlize.py\n+++ b/openstates/utils/lxmlize.py\n@@ -14,11 +14,13 @@\n Element: Document node representing the page.\n \"\"\"\n try:\n- response = requests.get(url)\n+ # This class is always mixed into subclasses of `billy.Scraper`,\n+ # which have a `get` method defined.\n+ response = self.get(url)\n except requests.exceptions.SSLError:\n self.warning('`self.lxmlize()` failed due to SSL error, trying'\\\n- 'an unverified `requests.get()`')\n- response = requests.get(url, verify=False)\n+ 'an unverified `self.get()` (i.e. `requests.get()`)')\n+ response = self.get(url, verify=False)\n \n if raise_exceptions:\n response.raise_for_status()\n", "issue": "Make `LXMLMixin.lxmlize` use scraper's `session`, rather than a raw `request.get()`\nOtherwise it slows down `--fast` scrapes because it doesn't use cached responses.\n", "before_files": [{"content": "import requests\nimport lxml.html\n\n\nclass LXMLMixin(object):\n \"\"\"Mixin for adding LXML helper functions to Open States code.\"\"\"\n\n def lxmlize(self, url, raise_exceptions=False):\n \"\"\"Parses document into an LXML object and makes links absolute.\n\n Args:\n url (str): URL of the document to parse.\n Returns:\n Element: Document node representing the page.\n \"\"\"\n try:\n response = requests.get(url)\n except requests.exceptions.SSLError:\n self.warning('`self.lxmlize()` failed due to SSL error, trying'\\\n 'an unverified `requests.get()`')\n response = requests.get(url, verify=False)\n\n if raise_exceptions:\n response.raise_for_status()\n\n page = lxml.html.fromstring(response.text)\n page.make_links_absolute(url)\n\n return page\n\n def get_node(self, base_node, xpath_query):\n \"\"\"Searches for node in an element tree.\n\n Attempts to return only the first node found for an xpath query. Meant\n to cut down on exception handling boilerplate.\n\n Args:\n base_node (Element): Document node to begin querying from.\n xpath_query (str): XPath query to define nodes to search for.\n Returns:\n Element: First node found that matches the query.\n \"\"\"\n try:\n node = base_node.xpath(xpath_query)[0]\n except IndexError:\n node = None\n\n return node\n\n def get_nodes(self, base_node, xpath_query):\n \"\"\"Searches for nodes in an element tree.\n\n Attempts to return all nodes found for an xpath query. Meant to cut\n down on exception handling boilerplate.\n\n Args:\n base_node (Element): Document node to begin querying from.\n xpath_query (str): Xpath query to define nodes to search for.\n Returns:\n List[Element]: All nodes found that match the query.\n \"\"\"\n return base_node.xpath(xpath_query)\n", "path": "openstates/utils/lxmlize.py"}]} | 1,115 | 205 |
gh_patches_debug_23058 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-2069 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[AKS] az aks get-upgrades --output table fails on extensions
- If the issue is to do with Azure CLI 2.0 in-particular, create an issue here at [Azure/azure-cli](https://github.com/Azure/azure-cli/issues)
### Extension name (the extension in question)
aks-preview
### Description of issue (in as much detail as possible)
Running
```
az aks get-upgrades -g {{RG_NAME}} -n {{CLUSTER_NAME}} --output table
```
fails with
```
preview/azext_aks_preview/_format.py", line 72, in aks_upgrades_table_format
find_preview_versions(result.get('agentPoolProfiles', [{}])[0])
TypeError: 'NoneType' object is not subscriptable
cli.azure.cli.core.util : Table output unavailable. Use the --query option to specify an appropriate query. Use --debug for more info.
Table output unavailable. Use the --query option to specify an appropriate query. Use --debug for more info.
```
The fix is to not use the agentPoolProfile when formatting the table since the field is nil. We need to update the formatter.
Issue is only when using extensions and the -o table. Works fine on CLI core.
-----
</issue>
<code>
[start of src/aks-preview/azext_aks_preview/_format.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from collections import OrderedDict
7 # pylint: disable=import-error
8 from jmespath import compile as compile_jmes, Options
9 # pylint: disable=import-error
10 from jmespath import functions
11
12
13 def aks_agentpool_show_table_format(result):
14 """Format an agent pool as summary results for display with "-o table"."""
15 return [_aks_agentpool_table_format(result)]
16
17
18 def _aks_agentpool_table_format(result):
19 parsed = compile_jmes("""{
20 name: name,
21 osType: osType,
22 kubernetesVersion: kubernetesVersion,
23 vmSize: vmSize,
24 osDiskSizeGB: osDiskSizeGB,
25 count: count,
26 maxPods: maxPods,
27 provisioningState: provisioningState,
28 mode: mode
29 }""")
30 # use ordered dicts so headers are predictable
31 return parsed.search(result, Options(dict_cls=OrderedDict))
32
33
34 def aks_agentpool_list_table_format(results):
35 """Format an agent pool list for display with "-o table"."""
36 return [_aks_agentpool_table_format(r) for r in results]
37
38
39 def aks_list_table_format(results):
40 """"Format a list of managed clusters as summary results for display with "-o table"."""
41 return [_aks_table_format(r) for r in results]
42
43
44 def aks_show_table_format(result):
45 """Format a managed cluster as summary results for display with "-o table"."""
46 return [_aks_table_format(result)]
47
48
49 def _aks_table_format(result):
50 parsed = compile_jmes("""{
51 name: name,
52 location: location,
53 resourceGroup: resourceGroup,
54 kubernetesVersion: kubernetesVersion,
55 provisioningState: provisioningState,
56 fqdn: fqdn
57 }""")
58 # use ordered dicts so headers are predictable
59 return parsed.search(result, Options(dict_cls=OrderedDict))
60
61
62 def aks_upgrades_table_format(result):
63 """Format get-upgrades results as a summary for display with "-o table"."""
64
65 preview = {}
66
67 def find_preview_versions(versions_bag):
68 for upgrade in versions_bag.get('upgrades', []):
69 if upgrade.get('isPreview', False):
70 preview[upgrade['kubernetesVersion']] = True
71 find_preview_versions(result.get('controlPlaneProfile', {}))
72 find_preview_versions(result.get('agentPoolProfiles', [{}])[0])
73
74 # This expression assumes there is one node pool, and that the master and nodes upgrade in lockstep.
75 parsed = compile_jmes("""{
76 name: name,
77 resourceGroup: resourceGroup,
78 masterVersion: controlPlaneProfile.kubernetesVersion || `unknown` | set_preview(@),
79 nodePoolVersion: agentPoolProfiles[0].kubernetesVersion || `unknown` | set_preview(@),
80 upgrades: controlPlaneProfile.upgrades[].kubernetesVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)
81 }""")
82 # use ordered dicts so headers are predictable
83 return parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions(preview)))
84
85
86 def aks_versions_table_format(result):
87 """Format get-versions results as a summary for display with "-o table"."""
88
89 # get preview orchestrator version
90 preview = {}
91
92 def find_preview_versions():
93 for orchestrator in result.get('orchestrators', []):
94 if orchestrator.get('isPreview', False):
95 preview[orchestrator['orchestratorVersion']] = True
96 find_preview_versions()
97
98 parsed = compile_jmes("""orchestrators[].{
99 kubernetesVersion: orchestratorVersion | set_preview(@),
100 upgrades: upgrades[].orchestratorVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)
101 }""")
102 # use ordered dicts so headers are predictable
103 results = parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions(preview)))
104 return sorted(results, key=lambda x: version_to_tuple(x.get('kubernetesVersion')), reverse=True)
105
106
107 def version_to_tuple(version):
108 """Removes preview suffix"""
109 if version.endswith('(preview)'):
110 version = version[:-len('(preview)')]
111 return tuple(map(int, (version.split('.'))))
112
113
114 def _custom_functions(preview_versions):
115 class CustomFunctions(functions.Functions): # pylint: disable=too-few-public-methods
116
117 @functions.signature({'types': ['array']})
118 def _func_sort_versions(self, versions): # pylint: disable=no-self-use
119 """Custom JMESPath `sort_versions` function that sorts an array of strings as software versions"""
120 try:
121 return sorted(versions, key=version_to_tuple)
122 except (TypeError, ValueError): # if it wasn't sortable, return the input so the pipeline continues
123 return versions
124
125 @functions.signature({'types': ['array']})
126 def _func_set_preview_array(self, versions):
127 """Custom JMESPath `set_preview_array` function that suffixes preview version"""
128 try:
129 for i, _ in enumerate(versions):
130 versions[i] = self._func_set_preview(versions[i])
131 return versions
132 except(TypeError, ValueError):
133 return versions
134
135 @functions.signature({'types': ['string']})
136 def _func_set_preview(self, version): # pylint: disable=no-self-use
137 """Custom JMESPath `set_preview` function that suffixes preview version"""
138 try:
139 if preview_versions.get(version, False):
140 return version + '(preview)'
141 return version
142 except(TypeError, ValueError):
143 return version
144
145 return CustomFunctions()
146
[end of src/aks-preview/azext_aks_preview/_format.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/aks-preview/azext_aks_preview/_format.py b/src/aks-preview/azext_aks_preview/_format.py
--- a/src/aks-preview/azext_aks_preview/_format.py
+++ b/src/aks-preview/azext_aks_preview/_format.py
@@ -69,14 +69,12 @@
if upgrade.get('isPreview', False):
preview[upgrade['kubernetesVersion']] = True
find_preview_versions(result.get('controlPlaneProfile', {}))
- find_preview_versions(result.get('agentPoolProfiles', [{}])[0])
# This expression assumes there is one node pool, and that the master and nodes upgrade in lockstep.
parsed = compile_jmes("""{
name: name,
resourceGroup: resourceGroup,
- masterVersion: controlPlaneProfile.kubernetesVersion || `unknown` | set_preview(@),
- nodePoolVersion: agentPoolProfiles[0].kubernetesVersion || `unknown` | set_preview(@),
+ masterVersion: controlPlaneProfile.kubernetesVersion || `unknown`,
upgrades: controlPlaneProfile.upgrades[].kubernetesVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)
}""")
# use ordered dicts so headers are predictable
| {"golden_diff": "diff --git a/src/aks-preview/azext_aks_preview/_format.py b/src/aks-preview/azext_aks_preview/_format.py\n--- a/src/aks-preview/azext_aks_preview/_format.py\n+++ b/src/aks-preview/azext_aks_preview/_format.py\n@@ -69,14 +69,12 @@\n if upgrade.get('isPreview', False):\n preview[upgrade['kubernetesVersion']] = True\n find_preview_versions(result.get('controlPlaneProfile', {}))\n- find_preview_versions(result.get('agentPoolProfiles', [{}])[0])\n \n # This expression assumes there is one node pool, and that the master and nodes upgrade in lockstep.\n parsed = compile_jmes(\"\"\"{\n name: name,\n resourceGroup: resourceGroup,\n- masterVersion: controlPlaneProfile.kubernetesVersion || `unknown` | set_preview(@),\n- nodePoolVersion: agentPoolProfiles[0].kubernetesVersion || `unknown` | set_preview(@),\n+ masterVersion: controlPlaneProfile.kubernetesVersion || `unknown`,\n upgrades: controlPlaneProfile.upgrades[].kubernetesVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)\n }\"\"\")\n # use ordered dicts so headers are predictable\n", "issue": "[AKS] az aks get-upgrades --output table fails on extensions\n- If the issue is to do with Azure CLI 2.0 in-particular, create an issue here at [Azure/azure-cli](https://github.com/Azure/azure-cli/issues)\r\n\r\n### Extension name (the extension in question)\r\naks-preview\r\n\r\n### Description of issue (in as much detail as possible)\r\nRunning\r\n```\r\naz aks get-upgrades -g {{RG_NAME}} -n {{CLUSTER_NAME}} --output table\r\n```\r\nfails with\r\n\r\n```\r\npreview/azext_aks_preview/_format.py\", line 72, in aks_upgrades_table_format\r\n find_preview_versions(result.get('agentPoolProfiles', [{}])[0])\r\nTypeError: 'NoneType' object is not subscriptable\r\n\r\ncli.azure.cli.core.util : Table output unavailable. Use the --query option to specify an appropriate query. Use --debug for more info.\r\nTable output unavailable. Use the --query option to specify an appropriate query. Use --debug for more info.\r\n```\r\n\r\nThe fix is to not use the agentPoolProfile when formatting the table since the field is nil. We need to update the formatter.\r\n\r\nIssue is only when using extensions and the -o table. Works fine on CLI core.\r\n-----\r\n\r\n\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom collections import OrderedDict\n# pylint: disable=import-error\nfrom jmespath import compile as compile_jmes, Options\n# pylint: disable=import-error\nfrom jmespath import functions\n\n\ndef aks_agentpool_show_table_format(result):\n \"\"\"Format an agent pool as summary results for display with \"-o table\".\"\"\"\n return [_aks_agentpool_table_format(result)]\n\n\ndef _aks_agentpool_table_format(result):\n parsed = compile_jmes(\"\"\"{\n name: name,\n osType: osType,\n kubernetesVersion: kubernetesVersion,\n vmSize: vmSize,\n osDiskSizeGB: osDiskSizeGB,\n count: count,\n maxPods: maxPods,\n provisioningState: provisioningState,\n mode: mode\n }\"\"\")\n # use ordered dicts so headers are predictable\n return parsed.search(result, Options(dict_cls=OrderedDict))\n\n\ndef aks_agentpool_list_table_format(results):\n \"\"\"Format an agent pool list for display with \"-o table\".\"\"\"\n return [_aks_agentpool_table_format(r) for r in results]\n\n\ndef aks_list_table_format(results):\n \"\"\"\"Format a list of managed clusters as summary results for display with \"-o table\".\"\"\"\n return [_aks_table_format(r) for r in results]\n\n\ndef aks_show_table_format(result):\n \"\"\"Format a managed cluster as summary results for display with \"-o table\".\"\"\"\n return [_aks_table_format(result)]\n\n\ndef _aks_table_format(result):\n parsed = compile_jmes(\"\"\"{\n name: name,\n location: location,\n resourceGroup: resourceGroup,\n kubernetesVersion: kubernetesVersion,\n provisioningState: provisioningState,\n fqdn: fqdn\n }\"\"\")\n # use ordered dicts so headers are predictable\n return parsed.search(result, Options(dict_cls=OrderedDict))\n\n\ndef aks_upgrades_table_format(result):\n \"\"\"Format get-upgrades results as a summary for display with \"-o table\".\"\"\"\n\n preview = {}\n\n def find_preview_versions(versions_bag):\n for upgrade in versions_bag.get('upgrades', []):\n if upgrade.get('isPreview', False):\n preview[upgrade['kubernetesVersion']] = True\n find_preview_versions(result.get('controlPlaneProfile', {}))\n find_preview_versions(result.get('agentPoolProfiles', [{}])[0])\n\n # This expression assumes there is one node pool, and that the master and nodes upgrade in lockstep.\n parsed = compile_jmes(\"\"\"{\n name: name,\n resourceGroup: resourceGroup,\n masterVersion: controlPlaneProfile.kubernetesVersion || `unknown` | set_preview(@),\n nodePoolVersion: agentPoolProfiles[0].kubernetesVersion || `unknown` | set_preview(@),\n upgrades: controlPlaneProfile.upgrades[].kubernetesVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)\n }\"\"\")\n # use ordered dicts so headers are predictable\n return parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions(preview)))\n\n\ndef aks_versions_table_format(result):\n \"\"\"Format get-versions results as a summary for display with \"-o table\".\"\"\"\n\n # get preview orchestrator version\n preview = {}\n\n def find_preview_versions():\n for orchestrator in result.get('orchestrators', []):\n if orchestrator.get('isPreview', False):\n preview[orchestrator['orchestratorVersion']] = True\n find_preview_versions()\n\n parsed = compile_jmes(\"\"\"orchestrators[].{\n kubernetesVersion: orchestratorVersion | set_preview(@),\n upgrades: upgrades[].orchestratorVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)\n }\"\"\")\n # use ordered dicts so headers are predictable\n results = parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions(preview)))\n return sorted(results, key=lambda x: version_to_tuple(x.get('kubernetesVersion')), reverse=True)\n\n\ndef version_to_tuple(version):\n \"\"\"Removes preview suffix\"\"\"\n if version.endswith('(preview)'):\n version = version[:-len('(preview)')]\n return tuple(map(int, (version.split('.'))))\n\n\ndef _custom_functions(preview_versions):\n class CustomFunctions(functions.Functions): # pylint: disable=too-few-public-methods\n\n @functions.signature({'types': ['array']})\n def _func_sort_versions(self, versions): # pylint: disable=no-self-use\n \"\"\"Custom JMESPath `sort_versions` function that sorts an array of strings as software versions\"\"\"\n try:\n return sorted(versions, key=version_to_tuple)\n except (TypeError, ValueError): # if it wasn't sortable, return the input so the pipeline continues\n return versions\n\n @functions.signature({'types': ['array']})\n def _func_set_preview_array(self, versions):\n \"\"\"Custom JMESPath `set_preview_array` function that suffixes preview version\"\"\"\n try:\n for i, _ in enumerate(versions):\n versions[i] = self._func_set_preview(versions[i])\n return versions\n except(TypeError, ValueError):\n return versions\n\n @functions.signature({'types': ['string']})\n def _func_set_preview(self, version): # pylint: disable=no-self-use\n \"\"\"Custom JMESPath `set_preview` function that suffixes preview version\"\"\"\n try:\n if preview_versions.get(version, False):\n return version + '(preview)'\n return version\n except(TypeError, ValueError):\n return version\n\n return CustomFunctions()\n", "path": "src/aks-preview/azext_aks_preview/_format.py"}]} | 2,390 | 280 |
gh_patches_debug_38544 | rasdani/github-patches | git_diff | biolab__orange3-5824 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FR: Single-Layer Neural Network possible?
I could be misreading NN widget help file [1], but is it possible to specify a single-layer NN - i.e. no hidden layers? If I enter 0 for the number of hidden layers neurons, it gives an error (screenshot no 1), while if I leave it blank, it defaults to 10. As an example, for a NN with 4 inputs, one output with logistic, what would I need to input in the neurons count box to get it (screenshot no 2)?
 
[1] Orange Data Mining - Neural Network Widget
https://orangedatamining.com/widget-catalog/model/neuralnetwork/
Env: Windows + Orange 3.31
</issue>
<code>
[start of Orange/widgets/model/owneuralnetwork.py]
1 from functools import partial
2 import copy
3 import logging
4 import re
5 import concurrent.futures
6 from itertools import chain
7
8 import numpy as np
9
10 from AnyQt.QtWidgets import QFormLayout, QLabel
11 from AnyQt.QtCore import Qt, QThread, QObject
12 from AnyQt.QtCore import pyqtSlot as Slot, pyqtSignal as Signal
13
14 from Orange.data import Table
15 from Orange.modelling import NNLearner
16 from Orange.widgets import gui
17 from Orange.widgets.settings import Setting
18 from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
19
20 from Orange.widgets.utils.concurrent import ThreadExecutor, FutureWatcher
21 from Orange.widgets.utils.widgetpreview import WidgetPreview
22
23
24 class Task(QObject):
25 """
26 A class that will hold the state for an learner evaluation.
27 """
28 done = Signal(object)
29 progressChanged = Signal(float)
30
31 future = None # type: concurrent.futures.Future
32 watcher = None # type: FutureWatcher
33 cancelled = False # type: bool
34
35 def setFuture(self, future):
36 if self.future is not None:
37 raise RuntimeError("future is already set")
38 self.future = future
39 self.watcher = FutureWatcher(future, parent=self)
40 self.watcher.done.connect(self.done)
41
42 def cancel(self):
43 """
44 Cancel the task.
45
46 Set the `cancelled` field to True and block until the future is done.
47 """
48 # set cancelled state
49 self.cancelled = True
50 self.future.cancel()
51 concurrent.futures.wait([self.future])
52
53 def emitProgressUpdate(self, value):
54 self.progressChanged.emit(value)
55
56 def isInterruptionRequested(self):
57 return self.cancelled
58
59
60 class CancelTaskException(BaseException):
61 pass
62
63
64 class OWNNLearner(OWBaseLearner):
65 name = "Neural Network"
66 description = "A multi-layer perceptron (MLP) algorithm with " \
67 "backpropagation."
68 icon = "icons/NN.svg"
69 priority = 90
70 keywords = ["mlp"]
71
72 LEARNER = NNLearner
73
74 activation = ["identity", "logistic", "tanh", "relu"]
75 act_lbl = ["Identity", "Logistic", "tanh", "ReLu"]
76 solver = ["lbfgs", "sgd", "adam"]
77 solv_lbl = ["L-BFGS-B", "SGD", "Adam"]
78
79 learner_name = Setting("Neural Network")
80 hidden_layers_input = Setting("100,")
81 activation_index = Setting(3)
82 solver_index = Setting(2)
83 max_iterations = Setting(200)
84 alpha_index = Setting(0)
85 replicable = Setting(True)
86 settings_version = 1
87
88 alphas = list(chain([x / 10000 for x in range(1, 10)],
89 [x / 1000 for x in range(1, 10)],
90 [x / 100 for x in range(1, 10)],
91 [x / 10 for x in range(1, 10)],
92 range(1, 10),
93 range(10, 100, 5),
94 range(100, 200, 10),
95 range(100, 1001, 50)))
96
97 def add_main_layout(self):
98 # this is part of init, pylint: disable=attribute-defined-outside-init
99 form = QFormLayout()
100 form.setFieldGrowthPolicy(form.AllNonFixedFieldsGrow)
101 form.setLabelAlignment(Qt.AlignLeft)
102 gui.widgetBox(self.controlArea, True, orientation=form)
103 form.addRow(
104 "Neurons in hidden layers:",
105 gui.lineEdit(
106 None, self, "hidden_layers_input",
107 orientation=Qt.Horizontal, callback=self.settings_changed,
108 tooltip="A list of integers defining neurons. Length of list "
109 "defines the number of layers. E.g. 4, 2, 2, 3.",
110 placeholderText="e.g. 10,"))
111 form.addRow(
112 "Activation:",
113 gui.comboBox(
114 None, self, "activation_index", orientation=Qt.Horizontal,
115 label="Activation:", items=[i for i in self.act_lbl],
116 callback=self.settings_changed))
117
118 form.addRow(
119 "Solver:",
120 gui.comboBox(
121 None, self, "solver_index", orientation=Qt.Horizontal,
122 label="Solver:", items=[i for i in self.solv_lbl],
123 callback=self.settings_changed))
124 self.reg_label = QLabel()
125 slider = gui.hSlider(
126 None, self, "alpha_index",
127 minValue=0, maxValue=len(self.alphas) - 1,
128 callback=lambda: (self.set_alpha(), self.settings_changed()),
129 createLabel=False)
130 form.addRow(self.reg_label, slider)
131 self.set_alpha()
132
133 form.addRow(
134 "Maximal number of iterations:",
135 gui.spin(
136 None, self, "max_iterations", 10, 1000000, step=10,
137 label="Max iterations:", orientation=Qt.Horizontal,
138 alignment=Qt.AlignRight, callback=self.settings_changed))
139
140 form.addRow(
141 gui.checkBox(
142 None, self, "replicable", label="Replicable training",
143 callback=self.settings_changed, attribute=Qt.WA_LayoutUsesWidgetRect)
144 )
145
146 def set_alpha(self):
147 # called from init, pylint: disable=attribute-defined-outside-init
148 self.strength_C = self.alphas[self.alpha_index]
149 self.reg_label.setText("Regularization, α={}:".format(self.strength_C))
150
151 @property
152 def alpha(self):
153 return self.alphas[self.alpha_index]
154
155 def setup_layout(self):
156 # this is part of init, pylint: disable=attribute-defined-outside-init
157 super().setup_layout()
158
159 self._task = None # type: Optional[Task]
160 self._executor = ThreadExecutor()
161
162 # just a test cancel button
163 b = gui.button(self.apply_button, self, "Cancel",
164 callback=self.cancel, addToLayout=False)
165 self.apply_button.layout().insertStretch(0, 100)
166 self.apply_button.layout().insertWidget(0, b)
167
168 def create_learner(self):
169 return self.LEARNER(
170 hidden_layer_sizes=self.get_hidden_layers(),
171 activation=self.activation[self.activation_index],
172 solver=self.solver[self.solver_index],
173 alpha=self.alpha,
174 random_state=1 if self.replicable else None,
175 max_iter=self.max_iterations,
176 preprocessors=self.preprocessors)
177
178 def get_learner_parameters(self):
179 return (("Hidden layers", ', '.join(map(str, self.get_hidden_layers()))),
180 ("Activation", self.act_lbl[self.activation_index]),
181 ("Solver", self.solv_lbl[self.solver_index]),
182 ("Alpha", self.alpha),
183 ("Max iterations", self.max_iterations),
184 ("Replicable training", self.replicable))
185
186 def get_hidden_layers(self):
187 layers = tuple(map(int, re.findall(r'\d+', self.hidden_layers_input)))
188 if not layers:
189 layers = (10,)
190 self.hidden_layers_input = "10,"
191 return layers
192
193 def update_model(self):
194 self.show_fitting_failed(None)
195 self.model = None
196 if self.check_data():
197 self.__update()
198 else:
199 self.Outputs.model.send(self.model)
200
201 @Slot(float)
202 def setProgressValue(self, value):
203 assert self.thread() is QThread.currentThread()
204 self.progressBarSet(value)
205
206 def __update(self):
207 if self._task is not None:
208 # First make sure any pending tasks are cancelled.
209 self.cancel()
210 assert self._task is None
211
212 max_iter = self.learner.kwargs["max_iter"]
213
214 # Setup the task state
215 task = Task()
216 lastemitted = 0.
217
218 def callback(iteration):
219 nonlocal task
220 nonlocal lastemitted
221 if task.isInterruptionRequested():
222 raise CancelTaskException()
223 progress = round(iteration / max_iter * 100)
224 if progress != lastemitted:
225 task.emitProgressUpdate(progress)
226 lastemitted = progress
227
228 # copy to set the callback so that the learner output is not modified
229 # (currently we can not pass callbacks to learners __call__)
230 learner = copy.copy(self.learner)
231 learner.callback = callback
232
233 def build_model(data, learner):
234 try:
235 return learner(data)
236 except CancelTaskException:
237 return None
238
239 build_model_func = partial(build_model, self.data, learner)
240
241 task.setFuture(self._executor.submit(build_model_func))
242 task.done.connect(self._task_finished)
243 task.progressChanged.connect(self.setProgressValue)
244
245 # set in setup_layout; pylint: disable=attribute-defined-outside-init
246 self._task = task
247
248 self.progressBarInit()
249 self.setBlocking(True)
250
251 @Slot(concurrent.futures.Future)
252 def _task_finished(self, f):
253 """
254 Parameters
255 ----------
256 f : Future
257 The future instance holding the built model
258 """
259 assert self.thread() is QThread.currentThread()
260 assert self._task is not None
261 assert self._task.future is f
262 assert f.done()
263 self._task.deleteLater()
264 self._task = None # pylint: disable=attribute-defined-outside-init
265 self.setBlocking(False)
266 self.progressBarFinished()
267
268 try:
269 self.model = f.result()
270 except Exception as ex: # pylint: disable=broad-except
271 # Log the exception with a traceback
272 log = logging.getLogger()
273 log.exception(__name__, exc_info=True)
274 self.model = None
275 self.show_fitting_failed(ex)
276 else:
277 self.model.name = self.learner_name
278 self.model.instances = self.data
279 self.model.skl_model.orange_callback = None # remove unpicklable callback
280 self.Outputs.model.send(self.model)
281
282 def cancel(self):
283 """
284 Cancel the current task (if any).
285 """
286 if self._task is not None:
287 self._task.cancel()
288 assert self._task.future.done()
289 # disconnect from the task
290 self._task.done.disconnect(self._task_finished)
291 self._task.progressChanged.disconnect(self.setProgressValue)
292 self._task.deleteLater()
293 self._task = None # pylint: disable=attribute-defined-outside-init
294
295 self.progressBarFinished()
296 self.setBlocking(False)
297
298 def onDeleteWidget(self):
299 self.cancel()
300 super().onDeleteWidget()
301
302 @classmethod
303 def migrate_settings(cls, settings, version):
304 if not version:
305 alpha = settings.pop("alpha", None)
306 if alpha is not None:
307 settings["alpha_index"] = \
308 np.argmin(np.abs(np.array(cls.alphas) - alpha))
309
310
311 if __name__ == "__main__": # pragma: no cover
312 WidgetPreview(OWNNLearner).run(Table("iris"))
313
[end of Orange/widgets/model/owneuralnetwork.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Orange/widgets/model/owneuralnetwork.py b/Orange/widgets/model/owneuralnetwork.py
--- a/Orange/widgets/model/owneuralnetwork.py
+++ b/Orange/widgets/model/owneuralnetwork.py
@@ -14,6 +14,7 @@
from Orange.data import Table
from Orange.modelling import NNLearner
from Orange.widgets import gui
+from Orange.widgets.widget import Msg
from Orange.widgets.settings import Setting
from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
@@ -81,11 +82,11 @@
activation_index = Setting(3)
solver_index = Setting(2)
max_iterations = Setting(200)
- alpha_index = Setting(0)
+ alpha_index = Setting(1)
replicable = Setting(True)
- settings_version = 1
+ settings_version = 2
- alphas = list(chain([x / 10000 for x in range(1, 10)],
+ alphas = list(chain([0], [x / 10000 for x in range(1, 10)],
[x / 1000 for x in range(1, 10)],
[x / 100 for x in range(1, 10)],
[x / 10 for x in range(1, 10)],
@@ -94,6 +95,11 @@
range(100, 200, 10),
range(100, 1001, 50)))
+ class Warning(OWBaseLearner.Warning):
+ no_layers = Msg("ANN without hidden layers is equivalent to logistic "
+ "regression with worse fitting.\nWe recommend using "
+ "logistic regression.")
+
def add_main_layout(self):
# this is part of init, pylint: disable=attribute-defined-outside-init
form = QFormLayout()
@@ -184,10 +190,10 @@
("Replicable training", self.replicable))
def get_hidden_layers(self):
+ self.Warning.no_layers.clear()
layers = tuple(map(int, re.findall(r'\d+', self.hidden_layers_input)))
if not layers:
- layers = (10,)
- self.hidden_layers_input = "10,"
+ self.Warning.no_layers()
return layers
def update_model(self):
@@ -306,6 +312,8 @@
if alpha is not None:
settings["alpha_index"] = \
np.argmin(np.abs(np.array(cls.alphas) - alpha))
+ elif version < 2:
+ settings["alpha_index"] = settings.get("alpha_index", 0) + 1
if __name__ == "__main__": # pragma: no cover
| {"golden_diff": "diff --git a/Orange/widgets/model/owneuralnetwork.py b/Orange/widgets/model/owneuralnetwork.py\n--- a/Orange/widgets/model/owneuralnetwork.py\n+++ b/Orange/widgets/model/owneuralnetwork.py\n@@ -14,6 +14,7 @@\n from Orange.data import Table\n from Orange.modelling import NNLearner\n from Orange.widgets import gui\n+from Orange.widgets.widget import Msg\n from Orange.widgets.settings import Setting\n from Orange.widgets.utils.owlearnerwidget import OWBaseLearner\n \n@@ -81,11 +82,11 @@\n activation_index = Setting(3)\n solver_index = Setting(2)\n max_iterations = Setting(200)\n- alpha_index = Setting(0)\n+ alpha_index = Setting(1)\n replicable = Setting(True)\n- settings_version = 1\n+ settings_version = 2\n \n- alphas = list(chain([x / 10000 for x in range(1, 10)],\n+ alphas = list(chain([0], [x / 10000 for x in range(1, 10)],\n [x / 1000 for x in range(1, 10)],\n [x / 100 for x in range(1, 10)],\n [x / 10 for x in range(1, 10)],\n@@ -94,6 +95,11 @@\n range(100, 200, 10),\n range(100, 1001, 50)))\n \n+ class Warning(OWBaseLearner.Warning):\n+ no_layers = Msg(\"ANN without hidden layers is equivalent to logistic \"\n+ \"regression with worse fitting.\\nWe recommend using \"\n+ \"logistic regression.\")\n+\n def add_main_layout(self):\n # this is part of init, pylint: disable=attribute-defined-outside-init\n form = QFormLayout()\n@@ -184,10 +190,10 @@\n (\"Replicable training\", self.replicable))\n \n def get_hidden_layers(self):\n+ self.Warning.no_layers.clear()\n layers = tuple(map(int, re.findall(r'\\d+', self.hidden_layers_input)))\n if not layers:\n- layers = (10,)\n- self.hidden_layers_input = \"10,\"\n+ self.Warning.no_layers()\n return layers\n \n def update_model(self):\n@@ -306,6 +312,8 @@\n if alpha is not None:\n settings[\"alpha_index\"] = \\\n np.argmin(np.abs(np.array(cls.alphas) - alpha))\n+ elif version < 2:\n+ settings[\"alpha_index\"] = settings.get(\"alpha_index\", 0) + 1\n \n \n if __name__ == \"__main__\": # pragma: no cover\n", "issue": "FR: Single-Layer Neural Network possible?\nI could be misreading NN widget help file [1], but is it possible to specify a single-layer NN - i.e. no hidden layers? If I enter 0 for the number of hidden layers neurons, it gives an error (screenshot no 1), while if I leave it blank, it defaults to 10. As an example, for a NN with 4 inputs, one output with logistic, what would I need to input in the neurons count box to get it (screenshot no 2)?\r\n\r\n \r\n\r\n[1] Orange Data Mining - Neural Network Widget\r\nhttps://orangedatamining.com/widget-catalog/model/neuralnetwork/\r\n\r\nEnv: Windows + Orange 3.31\n", "before_files": [{"content": "from functools import partial\nimport copy\nimport logging\nimport re\nimport concurrent.futures\nfrom itertools import chain\n\nimport numpy as np\n\nfrom AnyQt.QtWidgets import QFormLayout, QLabel\nfrom AnyQt.QtCore import Qt, QThread, QObject\nfrom AnyQt.QtCore import pyqtSlot as Slot, pyqtSignal as Signal\n\nfrom Orange.data import Table\nfrom Orange.modelling import NNLearner\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils.owlearnerwidget import OWBaseLearner\n\nfrom Orange.widgets.utils.concurrent import ThreadExecutor, FutureWatcher\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\n\n\nclass Task(QObject):\n \"\"\"\n A class that will hold the state for an learner evaluation.\n \"\"\"\n done = Signal(object)\n progressChanged = Signal(float)\n\n future = None # type: concurrent.futures.Future\n watcher = None # type: FutureWatcher\n cancelled = False # type: bool\n\n def setFuture(self, future):\n if self.future is not None:\n raise RuntimeError(\"future is already set\")\n self.future = future\n self.watcher = FutureWatcher(future, parent=self)\n self.watcher.done.connect(self.done)\n\n def cancel(self):\n \"\"\"\n Cancel the task.\n\n Set the `cancelled` field to True and block until the future is done.\n \"\"\"\n # set cancelled state\n self.cancelled = True\n self.future.cancel()\n concurrent.futures.wait([self.future])\n\n def emitProgressUpdate(self, value):\n self.progressChanged.emit(value)\n\n def isInterruptionRequested(self):\n return self.cancelled\n\n\nclass CancelTaskException(BaseException):\n pass\n\n\nclass OWNNLearner(OWBaseLearner):\n name = \"Neural Network\"\n description = \"A multi-layer perceptron (MLP) algorithm with \" \\\n \"backpropagation.\"\n icon = \"icons/NN.svg\"\n priority = 90\n keywords = [\"mlp\"]\n\n LEARNER = NNLearner\n\n activation = [\"identity\", \"logistic\", \"tanh\", \"relu\"]\n act_lbl = [\"Identity\", \"Logistic\", \"tanh\", \"ReLu\"]\n solver = [\"lbfgs\", \"sgd\", \"adam\"]\n solv_lbl = [\"L-BFGS-B\", \"SGD\", \"Adam\"]\n\n learner_name = Setting(\"Neural Network\")\n hidden_layers_input = Setting(\"100,\")\n activation_index = Setting(3)\n solver_index = Setting(2)\n max_iterations = Setting(200)\n alpha_index = Setting(0)\n replicable = Setting(True)\n settings_version = 1\n\n alphas = list(chain([x / 10000 for x in range(1, 10)],\n [x / 1000 for x in range(1, 10)],\n [x / 100 for x in range(1, 10)],\n [x / 10 for x in range(1, 10)],\n range(1, 10),\n range(10, 100, 5),\n range(100, 200, 10),\n range(100, 1001, 50)))\n\n def add_main_layout(self):\n # this is part of init, pylint: disable=attribute-defined-outside-init\n form = QFormLayout()\n form.setFieldGrowthPolicy(form.AllNonFixedFieldsGrow)\n form.setLabelAlignment(Qt.AlignLeft)\n gui.widgetBox(self.controlArea, True, orientation=form)\n form.addRow(\n \"Neurons in hidden layers:\",\n gui.lineEdit(\n None, self, \"hidden_layers_input\",\n orientation=Qt.Horizontal, callback=self.settings_changed,\n tooltip=\"A list of integers defining neurons. Length of list \"\n \"defines the number of layers. E.g. 4, 2, 2, 3.\",\n placeholderText=\"e.g. 10,\"))\n form.addRow(\n \"Activation:\",\n gui.comboBox(\n None, self, \"activation_index\", orientation=Qt.Horizontal,\n label=\"Activation:\", items=[i for i in self.act_lbl],\n callback=self.settings_changed))\n\n form.addRow(\n \"Solver:\",\n gui.comboBox(\n None, self, \"solver_index\", orientation=Qt.Horizontal,\n label=\"Solver:\", items=[i for i in self.solv_lbl],\n callback=self.settings_changed))\n self.reg_label = QLabel()\n slider = gui.hSlider(\n None, self, \"alpha_index\",\n minValue=0, maxValue=len(self.alphas) - 1,\n callback=lambda: (self.set_alpha(), self.settings_changed()),\n createLabel=False)\n form.addRow(self.reg_label, slider)\n self.set_alpha()\n\n form.addRow(\n \"Maximal number of iterations:\",\n gui.spin(\n None, self, \"max_iterations\", 10, 1000000, step=10,\n label=\"Max iterations:\", orientation=Qt.Horizontal,\n alignment=Qt.AlignRight, callback=self.settings_changed))\n\n form.addRow(\n gui.checkBox(\n None, self, \"replicable\", label=\"Replicable training\",\n callback=self.settings_changed, attribute=Qt.WA_LayoutUsesWidgetRect)\n )\n\n def set_alpha(self):\n # called from init, pylint: disable=attribute-defined-outside-init\n self.strength_C = self.alphas[self.alpha_index]\n self.reg_label.setText(\"Regularization, \u03b1={}:\".format(self.strength_C))\n\n @property\n def alpha(self):\n return self.alphas[self.alpha_index]\n\n def setup_layout(self):\n # this is part of init, pylint: disable=attribute-defined-outside-init\n super().setup_layout()\n\n self._task = None # type: Optional[Task]\n self._executor = ThreadExecutor()\n\n # just a test cancel button\n b = gui.button(self.apply_button, self, \"Cancel\",\n callback=self.cancel, addToLayout=False)\n self.apply_button.layout().insertStretch(0, 100)\n self.apply_button.layout().insertWidget(0, b)\n\n def create_learner(self):\n return self.LEARNER(\n hidden_layer_sizes=self.get_hidden_layers(),\n activation=self.activation[self.activation_index],\n solver=self.solver[self.solver_index],\n alpha=self.alpha,\n random_state=1 if self.replicable else None,\n max_iter=self.max_iterations,\n preprocessors=self.preprocessors)\n\n def get_learner_parameters(self):\n return ((\"Hidden layers\", ', '.join(map(str, self.get_hidden_layers()))),\n (\"Activation\", self.act_lbl[self.activation_index]),\n (\"Solver\", self.solv_lbl[self.solver_index]),\n (\"Alpha\", self.alpha),\n (\"Max iterations\", self.max_iterations),\n (\"Replicable training\", self.replicable))\n\n def get_hidden_layers(self):\n layers = tuple(map(int, re.findall(r'\\d+', self.hidden_layers_input)))\n if not layers:\n layers = (10,)\n self.hidden_layers_input = \"10,\"\n return layers\n\n def update_model(self):\n self.show_fitting_failed(None)\n self.model = None\n if self.check_data():\n self.__update()\n else:\n self.Outputs.model.send(self.model)\n\n @Slot(float)\n def setProgressValue(self, value):\n assert self.thread() is QThread.currentThread()\n self.progressBarSet(value)\n\n def __update(self):\n if self._task is not None:\n # First make sure any pending tasks are cancelled.\n self.cancel()\n assert self._task is None\n\n max_iter = self.learner.kwargs[\"max_iter\"]\n\n # Setup the task state\n task = Task()\n lastemitted = 0.\n\n def callback(iteration):\n nonlocal task\n nonlocal lastemitted\n if task.isInterruptionRequested():\n raise CancelTaskException()\n progress = round(iteration / max_iter * 100)\n if progress != lastemitted:\n task.emitProgressUpdate(progress)\n lastemitted = progress\n\n # copy to set the callback so that the learner output is not modified\n # (currently we can not pass callbacks to learners __call__)\n learner = copy.copy(self.learner)\n learner.callback = callback\n\n def build_model(data, learner):\n try:\n return learner(data)\n except CancelTaskException:\n return None\n\n build_model_func = partial(build_model, self.data, learner)\n\n task.setFuture(self._executor.submit(build_model_func))\n task.done.connect(self._task_finished)\n task.progressChanged.connect(self.setProgressValue)\n\n # set in setup_layout; pylint: disable=attribute-defined-outside-init\n self._task = task\n\n self.progressBarInit()\n self.setBlocking(True)\n\n @Slot(concurrent.futures.Future)\n def _task_finished(self, f):\n \"\"\"\n Parameters\n ----------\n f : Future\n The future instance holding the built model\n \"\"\"\n assert self.thread() is QThread.currentThread()\n assert self._task is not None\n assert self._task.future is f\n assert f.done()\n self._task.deleteLater()\n self._task = None # pylint: disable=attribute-defined-outside-init\n self.setBlocking(False)\n self.progressBarFinished()\n\n try:\n self.model = f.result()\n except Exception as ex: # pylint: disable=broad-except\n # Log the exception with a traceback\n log = logging.getLogger()\n log.exception(__name__, exc_info=True)\n self.model = None\n self.show_fitting_failed(ex)\n else:\n self.model.name = self.learner_name\n self.model.instances = self.data\n self.model.skl_model.orange_callback = None # remove unpicklable callback\n self.Outputs.model.send(self.model)\n\n def cancel(self):\n \"\"\"\n Cancel the current task (if any).\n \"\"\"\n if self._task is not None:\n self._task.cancel()\n assert self._task.future.done()\n # disconnect from the task\n self._task.done.disconnect(self._task_finished)\n self._task.progressChanged.disconnect(self.setProgressValue)\n self._task.deleteLater()\n self._task = None # pylint: disable=attribute-defined-outside-init\n\n self.progressBarFinished()\n self.setBlocking(False)\n\n def onDeleteWidget(self):\n self.cancel()\n super().onDeleteWidget()\n\n @classmethod\n def migrate_settings(cls, settings, version):\n if not version:\n alpha = settings.pop(\"alpha\", None)\n if alpha is not None:\n settings[\"alpha_index\"] = \\\n np.argmin(np.abs(np.array(cls.alphas) - alpha))\n\n\nif __name__ == \"__main__\": # pragma: no cover\n WidgetPreview(OWNNLearner).run(Table(\"iris\"))\n", "path": "Orange/widgets/model/owneuralnetwork.py"}]} | 4,025 | 631 |
gh_patches_debug_25757 | rasdani/github-patches | git_diff | StackStorm__st2-2870 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot create action with a name that collides with a Python library
We created an action called "elasticsearch.py" (with the appropriate yaml). When we try to run it we get an ST2 exception. We are able to reproduce with another action called "test.py". Our hypothesis is that if the action name is the same as a Python library, it fails to run it. This happens after migrating to 1.4.0 from 0.13.2 (it works fine in 0.13.2).
Here's the stacktrace:
```
$ st2 run netflix_samples.elasticsearch app_region=us-east-1
.
id: 57211d9b7279e6326f6b025f
status: failed
parameters:
app_region: us-east-1
result:
exit_code: 1
result: null
stderr: "Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/runners/python_action_wrapper.py", line 164, in <module>
obj.run()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/runners/python_action_wrapper.py", line 102, in run
action = self._get_action_instance()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/runners/python_action_wrapper.py", line 116, in _get_action_instance
actions_cls = action_loader.register_plugin(Action, self._file_path)
File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/util/loader.py", line 163, in register_plugin
(plugin_abs_file_path))
Exception: Found no classes in plugin file "/opt/stackstorm/packs/netflix_samples/actions/elasticsearch.py" matching requirements.
"
stdout: ''
```
</issue>
<code>
[start of st2common/st2common/util/loader.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import importlib
17 import inspect
18 import json
19 import os
20 import sys
21 import yaml
22
23 from st2common.exceptions.plugins import IncompatiblePluginException
24 from st2common import log as logging
25
26 __all__ = [
27 'register_plugin',
28 'register_plugin_class'
29 ]
30
31
32 LOG = logging.getLogger(__name__)
33 PYTHON_EXTENSIONS = ('.py')
34
35
36 def _register_plugin_path(plugin_dir_abs_path):
37 if not os.path.isdir(plugin_dir_abs_path):
38 raise Exception('Directory "%s" with plugins doesn\'t exist' % (plugin_dir_abs_path))
39
40 for x in sys.path:
41 if plugin_dir_abs_path in (x, x + os.sep):
42 return
43 sys.path.append(plugin_dir_abs_path)
44
45
46 def _get_plugin_module(plugin_file_path):
47 plugin_module = os.path.basename(plugin_file_path)
48 if plugin_module.endswith(PYTHON_EXTENSIONS):
49 plugin_module = plugin_module[:plugin_module.rfind('.py')]
50 else:
51 plugin_module = None
52 return plugin_module
53
54
55 def _get_classes_in_module(module):
56 return [kls for name, kls in inspect.getmembers(module,
57 lambda member: inspect.isclass(member) and member.__module__ == module.__name__)]
58
59
60 def _get_plugin_classes(module_name):
61 return _get_classes_in_module(module_name)
62
63
64 def _get_plugin_methods(plugin_klass):
65 """
66 Return a list of names of all the methods in the provided class.
67
68 Note: Abstract methods which are not implemented are excluded from the
69 list.
70
71 :rtype: ``list`` of ``str``
72 """
73 methods = inspect.getmembers(plugin_klass, inspect.ismethod)
74
75 # Exclude inherited abstract methods from the parent class
76 method_names = []
77 for name, method in methods:
78 method_properties = method.__dict__
79 is_abstract = method_properties.get('__isabstractmethod__', False)
80
81 if is_abstract:
82 continue
83
84 method_names.append(name)
85 return method_names
86
87
88 def _validate_methods(plugin_base_class, plugin_klass):
89 '''
90 XXX: This is hacky but we'd like to validate the methods
91 in plugin_impl at least has all the *abstract* methods in
92 plugin_base_class.
93 '''
94 expected_methods = plugin_base_class.__abstractmethods__
95 plugin_methods = _get_plugin_methods(plugin_klass)
96 for method in expected_methods:
97 if method not in plugin_methods:
98 message = 'Class "%s" doesn\'t implement required "%s" method from the base class'
99 raise IncompatiblePluginException(message % (plugin_klass.__name__, method))
100
101
102 def _register_plugin(plugin_base_class, plugin_impl):
103 _validate_methods(plugin_base_class, plugin_impl)
104 plugin_base_class.register(plugin_impl)
105
106
107 def register_plugin_class(base_class, file_path, class_name):
108 """
109 Retrieve a register plugin class from the provided file.
110
111 This method also validate that the class implements all the abstract methods
112 from the base plugin class.
113
114 :param base_class: Base plugin class.
115 :param base_class: ``class``
116
117 :param file_path: File absolute path to the plugin module file.
118 :type file_path: ``str``
119
120 :param class_name: Class name of a plugin.
121 :type class_name: ``str``
122 """
123 plugin_dir = os.path.dirname(os.path.realpath(file_path))
124 _register_plugin_path(plugin_dir)
125 module_name = _get_plugin_module(file_path)
126
127 if module_name is None:
128 return None
129
130 module = importlib.import_module(module_name)
131 klass = getattr(module, class_name, None)
132
133 if not klass:
134 raise Exception('Plugin file "%s" doesn\'t expose class named "%s"' %
135 (file_path, class_name))
136
137 _register_plugin(base_class, klass)
138 return klass
139
140
141 def register_plugin(plugin_base_class, plugin_abs_file_path):
142 registered_plugins = []
143 plugin_dir = os.path.dirname(os.path.realpath(plugin_abs_file_path))
144 _register_plugin_path(plugin_dir)
145 module_name = _get_plugin_module(plugin_abs_file_path)
146 if module_name is None:
147 return None
148 module = importlib.import_module(module_name)
149 klasses = _get_plugin_classes(module)
150
151 # Try registering classes in plugin file. Some may fail.
152 for klass in klasses:
153 try:
154 _register_plugin(plugin_base_class, klass)
155 registered_plugins.append(klass)
156 except Exception as e:
157 LOG.exception(e)
158 LOG.debug('Skipping class %s as it doesn\'t match specs.', klass)
159 continue
160
161 if len(registered_plugins) == 0:
162 raise Exception('Found no classes in plugin file "%s" matching requirements.' %
163 (plugin_abs_file_path))
164
165 return registered_plugins
166
167
168 ALLOWED_EXTS = ['.json', '.yaml', '.yml']
169 PARSER_FUNCS = {'.json': json.load, '.yml': yaml.safe_load, '.yaml': yaml.safe_load}
170
171
172 def load_meta_file(file_path):
173 if not os.path.isfile(file_path):
174 raise Exception('File "%s" does not exist.' % file_path)
175
176 file_name, file_ext = os.path.splitext(file_path)
177 if file_ext not in ALLOWED_EXTS:
178 raise Exception('Unsupported meta type %s, file %s. Allowed: %s' %
179 (file_ext, file_path, ALLOWED_EXTS))
180
181 with open(file_path, 'r') as f:
182 return PARSER_FUNCS[file_ext](f)
183
[end of st2common/st2common/util/loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/st2common/st2common/util/loader.py b/st2common/st2common/util/loader.py
--- a/st2common/st2common/util/loader.py
+++ b/st2common/st2common/util/loader.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import importlib
+import imp
import inspect
import json
import os
@@ -127,7 +127,7 @@
if module_name is None:
return None
- module = importlib.import_module(module_name)
+ module = imp.load_source(module_name, file_path)
klass = getattr(module, class_name, None)
if not klass:
@@ -142,10 +142,12 @@
registered_plugins = []
plugin_dir = os.path.dirname(os.path.realpath(plugin_abs_file_path))
_register_plugin_path(plugin_dir)
+
module_name = _get_plugin_module(plugin_abs_file_path)
if module_name is None:
return None
- module = importlib.import_module(module_name)
+
+ module = imp.load_source(module_name, plugin_abs_file_path)
klasses = _get_plugin_classes(module)
# Try registering classes in plugin file. Some may fail.
| {"golden_diff": "diff --git a/st2common/st2common/util/loader.py b/st2common/st2common/util/loader.py\n--- a/st2common/st2common/util/loader.py\n+++ b/st2common/st2common/util/loader.py\n@@ -13,7 +13,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import importlib\n+import imp\n import inspect\n import json\n import os\n@@ -127,7 +127,7 @@\n if module_name is None:\n return None\n \n- module = importlib.import_module(module_name)\n+ module = imp.load_source(module_name, file_path)\n klass = getattr(module, class_name, None)\n \n if not klass:\n@@ -142,10 +142,12 @@\n registered_plugins = []\n plugin_dir = os.path.dirname(os.path.realpath(plugin_abs_file_path))\n _register_plugin_path(plugin_dir)\n+\n module_name = _get_plugin_module(plugin_abs_file_path)\n if module_name is None:\n return None\n- module = importlib.import_module(module_name)\n+\n+ module = imp.load_source(module_name, plugin_abs_file_path)\n klasses = _get_plugin_classes(module)\n \n # Try registering classes in plugin file. Some may fail.\n", "issue": "Cannot create action with a name that collides with a Python library\nWe created an action called \"elasticsearch.py\" (with the appropriate yaml). When we try to run it we get an ST2 exception. We are able to reproduce with another action called \"test.py\". Our hypothesis is that if the action name is the same as a Python library, it fails to run it. This happens after migrating to 1.4.0 from 0.13.2 (it works fine in 0.13.2).\n\nHere's the stacktrace:\n\n```\n$ st2 run netflix_samples.elasticsearch app_region=us-east-1\n.\nid: 57211d9b7279e6326f6b025f\nstatus: failed\nparameters: \n app_region: us-east-1\nresult: \n exit_code: 1\n result: null\n stderr: \"Traceback (most recent call last):\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/runners/python_action_wrapper.py\", line 164, in <module>\n obj.run()\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/runners/python_action_wrapper.py\", line 102, in run\n action = self._get_action_instance()\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2actions/runners/python_action_wrapper.py\", line 116, in _get_action_instance\n actions_cls = action_loader.register_plugin(Action, self._file_path)\n File \"/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/util/loader.py\", line 163, in register_plugin\n (plugin_abs_file_path))\nException: Found no classes in plugin file \"/opt/stackstorm/packs/netflix_samples/actions/elasticsearch.py\" matching requirements.\n\"\n stdout: ''\n```\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport inspect\nimport json\nimport os\nimport sys\nimport yaml\n\nfrom st2common.exceptions.plugins import IncompatiblePluginException\nfrom st2common import log as logging\n\n__all__ = [\n 'register_plugin',\n 'register_plugin_class'\n]\n\n\nLOG = logging.getLogger(__name__)\nPYTHON_EXTENSIONS = ('.py')\n\n\ndef _register_plugin_path(plugin_dir_abs_path):\n if not os.path.isdir(plugin_dir_abs_path):\n raise Exception('Directory \"%s\" with plugins doesn\\'t exist' % (plugin_dir_abs_path))\n\n for x in sys.path:\n if plugin_dir_abs_path in (x, x + os.sep):\n return\n sys.path.append(plugin_dir_abs_path)\n\n\ndef _get_plugin_module(plugin_file_path):\n plugin_module = os.path.basename(plugin_file_path)\n if plugin_module.endswith(PYTHON_EXTENSIONS):\n plugin_module = plugin_module[:plugin_module.rfind('.py')]\n else:\n plugin_module = None\n return plugin_module\n\n\ndef _get_classes_in_module(module):\n return [kls for name, kls in inspect.getmembers(module,\n lambda member: inspect.isclass(member) and member.__module__ == module.__name__)]\n\n\ndef _get_plugin_classes(module_name):\n return _get_classes_in_module(module_name)\n\n\ndef _get_plugin_methods(plugin_klass):\n \"\"\"\n Return a list of names of all the methods in the provided class.\n\n Note: Abstract methods which are not implemented are excluded from the\n list.\n\n :rtype: ``list`` of ``str``\n \"\"\"\n methods = inspect.getmembers(plugin_klass, inspect.ismethod)\n\n # Exclude inherited abstract methods from the parent class\n method_names = []\n for name, method in methods:\n method_properties = method.__dict__\n is_abstract = method_properties.get('__isabstractmethod__', False)\n\n if is_abstract:\n continue\n\n method_names.append(name)\n return method_names\n\n\ndef _validate_methods(plugin_base_class, plugin_klass):\n '''\n XXX: This is hacky but we'd like to validate the methods\n in plugin_impl at least has all the *abstract* methods in\n plugin_base_class.\n '''\n expected_methods = plugin_base_class.__abstractmethods__\n plugin_methods = _get_plugin_methods(plugin_klass)\n for method in expected_methods:\n if method not in plugin_methods:\n message = 'Class \"%s\" doesn\\'t implement required \"%s\" method from the base class'\n raise IncompatiblePluginException(message % (plugin_klass.__name__, method))\n\n\ndef _register_plugin(plugin_base_class, plugin_impl):\n _validate_methods(plugin_base_class, plugin_impl)\n plugin_base_class.register(plugin_impl)\n\n\ndef register_plugin_class(base_class, file_path, class_name):\n \"\"\"\n Retrieve a register plugin class from the provided file.\n\n This method also validate that the class implements all the abstract methods\n from the base plugin class.\n\n :param base_class: Base plugin class.\n :param base_class: ``class``\n\n :param file_path: File absolute path to the plugin module file.\n :type file_path: ``str``\n\n :param class_name: Class name of a plugin.\n :type class_name: ``str``\n \"\"\"\n plugin_dir = os.path.dirname(os.path.realpath(file_path))\n _register_plugin_path(plugin_dir)\n module_name = _get_plugin_module(file_path)\n\n if module_name is None:\n return None\n\n module = importlib.import_module(module_name)\n klass = getattr(module, class_name, None)\n\n if not klass:\n raise Exception('Plugin file \"%s\" doesn\\'t expose class named \"%s\"' %\n (file_path, class_name))\n\n _register_plugin(base_class, klass)\n return klass\n\n\ndef register_plugin(plugin_base_class, plugin_abs_file_path):\n registered_plugins = []\n plugin_dir = os.path.dirname(os.path.realpath(plugin_abs_file_path))\n _register_plugin_path(plugin_dir)\n module_name = _get_plugin_module(plugin_abs_file_path)\n if module_name is None:\n return None\n module = importlib.import_module(module_name)\n klasses = _get_plugin_classes(module)\n\n # Try registering classes in plugin file. Some may fail.\n for klass in klasses:\n try:\n _register_plugin(plugin_base_class, klass)\n registered_plugins.append(klass)\n except Exception as e:\n LOG.exception(e)\n LOG.debug('Skipping class %s as it doesn\\'t match specs.', klass)\n continue\n\n if len(registered_plugins) == 0:\n raise Exception('Found no classes in plugin file \"%s\" matching requirements.' %\n (plugin_abs_file_path))\n\n return registered_plugins\n\n\nALLOWED_EXTS = ['.json', '.yaml', '.yml']\nPARSER_FUNCS = {'.json': json.load, '.yml': yaml.safe_load, '.yaml': yaml.safe_load}\n\n\ndef load_meta_file(file_path):\n if not os.path.isfile(file_path):\n raise Exception('File \"%s\" does not exist.' % file_path)\n\n file_name, file_ext = os.path.splitext(file_path)\n if file_ext not in ALLOWED_EXTS:\n raise Exception('Unsupported meta type %s, file %s. Allowed: %s' %\n (file_ext, file_path, ALLOWED_EXTS))\n\n with open(file_path, 'r') as f:\n return PARSER_FUNCS[file_ext](f)\n", "path": "st2common/st2common/util/loader.py"}]} | 2,740 | 282 |
gh_patches_debug_13345 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] 1.9 OCR output mode "skip_noarchive" is not valid
### Description
With version 1.9 the following env trow errors:
```ini
PAPERLESS_OCR_MODE=skip_noarchive
```
### Steps to reproduce
1. set above docker env variable
2. start container
### Webserver logs
```bash
Paperless-ngx docker container starting...
Mapping UID and GID for paperless:paperless to 50014:50014
Creating directory /tmp/paperless
Adjusting permissions of paperless files. This may take a while.
Waiting for PostgreSQL to start...
Waiting for Redis...
Connected to Redis broker.
Apply database migrations...
SystemCheckError: System check identified some issues:
ERRORS:
?: OCR output mode "skip_noarchive" is not valid
```
### Paperless-ngx version
1.9
### Host OS
Ubuntu 22.04 x86_64
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless/checks.py]
1 import grp
2 import os
3 import pwd
4 import shutil
5 import stat
6
7 from django.conf import settings
8 from django.core.checks import Error
9 from django.core.checks import register
10 from django.core.checks import Warning
11
12 exists_message = "{} is set but doesn't exist."
13 exists_hint = "Create a directory at {}"
14 writeable_message = "{} is not writeable"
15 writeable_hint = (
16 "Set the permissions of {} to be writeable by the user running the "
17 "Paperless services"
18 )
19
20
21 def path_check(var, directory):
22 messages = []
23 if directory:
24 if not os.path.isdir(directory):
25 messages.append(
26 Error(exists_message.format(var), exists_hint.format(directory)),
27 )
28 else:
29 test_file = os.path.join(
30 directory,
31 f"__paperless_write_test_{os.getpid()}__",
32 )
33 try:
34 with open(test_file, "w"):
35 pass
36 except PermissionError:
37 dir_stat = os.stat(directory)
38 dir_mode = stat.filemode(dir_stat.st_mode)
39 dir_owner = pwd.getpwuid(dir_stat.st_uid).pw_name
40 dir_group = grp.getgrgid(dir_stat.st_gid).gr_name
41 messages.append(
42 Error(
43 writeable_message.format(var),
44 writeable_hint.format(
45 f"\n{dir_mode} {dir_owner} {dir_group} " f"{directory}\n",
46 ),
47 ),
48 )
49 finally:
50 if os.path.isfile(test_file):
51 os.remove(test_file)
52
53 return messages
54
55
56 @register()
57 def paths_check(app_configs, **kwargs):
58 """
59 Check the various paths for existence, readability and writeability
60 """
61
62 return (
63 path_check("PAPERLESS_DATA_DIR", settings.DATA_DIR)
64 + path_check("PAPERLESS_TRASH_DIR", settings.TRASH_DIR)
65 + path_check("PAPERLESS_MEDIA_ROOT", settings.MEDIA_ROOT)
66 + path_check("PAPERLESS_CONSUMPTION_DIR", settings.CONSUMPTION_DIR)
67 )
68
69
70 @register()
71 def binaries_check(app_configs, **kwargs):
72 """
73 Paperless requires the existence of a few binaries, so we do some checks
74 for those here.
75 """
76
77 error = "Paperless can't find {}. Without it, consumption is impossible."
78 hint = "Either it's not in your ${PATH} or it's not installed."
79
80 binaries = (settings.CONVERT_BINARY, "tesseract")
81
82 check_messages = []
83 for binary in binaries:
84 if shutil.which(binary) is None:
85 check_messages.append(Warning(error.format(binary), hint))
86
87 return check_messages
88
89
90 @register()
91 def debug_mode_check(app_configs, **kwargs):
92 if settings.DEBUG:
93 return [
94 Warning(
95 "DEBUG mode is enabled. Disable Debug mode. This is a serious "
96 "security issue, since it puts security overides in place which "
97 "are meant to be only used during development. This "
98 "also means that paperless will tell anyone various "
99 "debugging information when something goes wrong.",
100 ),
101 ]
102 else:
103 return []
104
105
106 @register()
107 def settings_values_check(app_configs, **kwargs):
108 """
109 Validates at least some of the user provided settings
110 """
111
112 def _ocrmypdf_settings_check():
113 """
114 Validates some of the arguments which will be provided to ocrmypdf
115 against the valid options. Use "ocrmypdf --help" to see the valid
116 inputs
117 """
118 msgs = []
119 if settings.OCR_OUTPUT_TYPE not in {
120 "pdfa",
121 "pdf",
122 "pdfa-1",
123 "pdfa-2",
124 "pdfa-3",
125 }:
126 msgs.append(
127 Error(f'OCR output type "{settings.OCR_OUTPUT_TYPE}" is not valid'),
128 )
129
130 if settings.OCR_MODE not in {"force", "skip", "redo_ocr"}:
131 msgs.append(Error(f'OCR output mode "{settings.OCR_MODE}" is not valid'))
132
133 if settings.OCR_CLEAN not in {"clean", "clean_final"}:
134 msgs.append(Error(f'OCR clean mode "{settings.OCR_CLEAN}" is not valid'))
135 return msgs
136
137 def _timezone_validate():
138 """
139 Validates the user provided timezone is a valid timezone
140 """
141 try:
142 import zoneinfo
143 except ImportError: # pragma: nocover
144 import backports.zoneinfo as zoneinfo
145 msgs = []
146 if settings.TIME_ZONE not in zoneinfo.available_timezones():
147 msgs.append(
148 Error(f'Timezone "{settings.TIME_ZONE}" is not a valid timezone'),
149 )
150 return msgs
151
152 return _ocrmypdf_settings_check() + _timezone_validate()
153
[end of src/paperless/checks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless/checks.py b/src/paperless/checks.py
--- a/src/paperless/checks.py
+++ b/src/paperless/checks.py
@@ -127,10 +127,10 @@
Error(f'OCR output type "{settings.OCR_OUTPUT_TYPE}" is not valid'),
)
- if settings.OCR_MODE not in {"force", "skip", "redo_ocr"}:
+ if settings.OCR_MODE not in {"force", "skip", "redo", "skip_noarchive"}:
msgs.append(Error(f'OCR output mode "{settings.OCR_MODE}" is not valid'))
- if settings.OCR_CLEAN not in {"clean", "clean_final"}:
+ if settings.OCR_CLEAN not in {"clean", "clean-final"}:
msgs.append(Error(f'OCR clean mode "{settings.OCR_CLEAN}" is not valid'))
return msgs
| {"golden_diff": "diff --git a/src/paperless/checks.py b/src/paperless/checks.py\n--- a/src/paperless/checks.py\n+++ b/src/paperless/checks.py\n@@ -127,10 +127,10 @@\n Error(f'OCR output type \"{settings.OCR_OUTPUT_TYPE}\" is not valid'),\n )\n \n- if settings.OCR_MODE not in {\"force\", \"skip\", \"redo_ocr\"}:\n+ if settings.OCR_MODE not in {\"force\", \"skip\", \"redo\", \"skip_noarchive\"}:\n msgs.append(Error(f'OCR output mode \"{settings.OCR_MODE}\" is not valid'))\n \n- if settings.OCR_CLEAN not in {\"clean\", \"clean_final\"}:\n+ if settings.OCR_CLEAN not in {\"clean\", \"clean-final\"}:\n msgs.append(Error(f'OCR clean mode \"{settings.OCR_CLEAN}\" is not valid'))\n return msgs\n", "issue": "[BUG] 1.9 OCR output mode \"skip_noarchive\" is not valid\n### Description\n\nWith version 1.9 the following env trow errors:\r\n```ini\r\nPAPERLESS_OCR_MODE=skip_noarchive\r\n```\n\n### Steps to reproduce\n\n1. set above docker env variable\r\n2. start container\n\n### Webserver logs\n\n```bash\nPaperless-ngx docker container starting...\r\nMapping UID and GID for paperless:paperless to 50014:50014\r\nCreating directory /tmp/paperless\r\nAdjusting permissions of paperless files. This may take a while.\r\nWaiting for PostgreSQL to start...\r\nWaiting for Redis...\r\nConnected to Redis broker.\r\nApply database migrations...\r\nSystemCheckError: System check identified some issues:\r\nERRORS:\r\n?: OCR output mode \"skip_noarchive\" is not valid\n```\n\n\n### Paperless-ngx version\n\n1.9\n\n### Host OS\n\nUbuntu 22.04 x86_64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import grp\nimport os\nimport pwd\nimport shutil\nimport stat\n\nfrom django.conf import settings\nfrom django.core.checks import Error\nfrom django.core.checks import register\nfrom django.core.checks import Warning\n\nexists_message = \"{} is set but doesn't exist.\"\nexists_hint = \"Create a directory at {}\"\nwriteable_message = \"{} is not writeable\"\nwriteable_hint = (\n \"Set the permissions of {} to be writeable by the user running the \"\n \"Paperless services\"\n)\n\n\ndef path_check(var, directory):\n messages = []\n if directory:\n if not os.path.isdir(directory):\n messages.append(\n Error(exists_message.format(var), exists_hint.format(directory)),\n )\n else:\n test_file = os.path.join(\n directory,\n f\"__paperless_write_test_{os.getpid()}__\",\n )\n try:\n with open(test_file, \"w\"):\n pass\n except PermissionError:\n dir_stat = os.stat(directory)\n dir_mode = stat.filemode(dir_stat.st_mode)\n dir_owner = pwd.getpwuid(dir_stat.st_uid).pw_name\n dir_group = grp.getgrgid(dir_stat.st_gid).gr_name\n messages.append(\n Error(\n writeable_message.format(var),\n writeable_hint.format(\n f\"\\n{dir_mode} {dir_owner} {dir_group} \" f\"{directory}\\n\",\n ),\n ),\n )\n finally:\n if os.path.isfile(test_file):\n os.remove(test_file)\n\n return messages\n\n\n@register()\ndef paths_check(app_configs, **kwargs):\n \"\"\"\n Check the various paths for existence, readability and writeability\n \"\"\"\n\n return (\n path_check(\"PAPERLESS_DATA_DIR\", settings.DATA_DIR)\n + path_check(\"PAPERLESS_TRASH_DIR\", settings.TRASH_DIR)\n + path_check(\"PAPERLESS_MEDIA_ROOT\", settings.MEDIA_ROOT)\n + path_check(\"PAPERLESS_CONSUMPTION_DIR\", settings.CONSUMPTION_DIR)\n )\n\n\n@register()\ndef binaries_check(app_configs, **kwargs):\n \"\"\"\n Paperless requires the existence of a few binaries, so we do some checks\n for those here.\n \"\"\"\n\n error = \"Paperless can't find {}. Without it, consumption is impossible.\"\n hint = \"Either it's not in your ${PATH} or it's not installed.\"\n\n binaries = (settings.CONVERT_BINARY, \"tesseract\")\n\n check_messages = []\n for binary in binaries:\n if shutil.which(binary) is None:\n check_messages.append(Warning(error.format(binary), hint))\n\n return check_messages\n\n\n@register()\ndef debug_mode_check(app_configs, **kwargs):\n if settings.DEBUG:\n return [\n Warning(\n \"DEBUG mode is enabled. Disable Debug mode. This is a serious \"\n \"security issue, since it puts security overides in place which \"\n \"are meant to be only used during development. This \"\n \"also means that paperless will tell anyone various \"\n \"debugging information when something goes wrong.\",\n ),\n ]\n else:\n return []\n\n\n@register()\ndef settings_values_check(app_configs, **kwargs):\n \"\"\"\n Validates at least some of the user provided settings\n \"\"\"\n\n def _ocrmypdf_settings_check():\n \"\"\"\n Validates some of the arguments which will be provided to ocrmypdf\n against the valid options. Use \"ocrmypdf --help\" to see the valid\n inputs\n \"\"\"\n msgs = []\n if settings.OCR_OUTPUT_TYPE not in {\n \"pdfa\",\n \"pdf\",\n \"pdfa-1\",\n \"pdfa-2\",\n \"pdfa-3\",\n }:\n msgs.append(\n Error(f'OCR output type \"{settings.OCR_OUTPUT_TYPE}\" is not valid'),\n )\n\n if settings.OCR_MODE not in {\"force\", \"skip\", \"redo_ocr\"}:\n msgs.append(Error(f'OCR output mode \"{settings.OCR_MODE}\" is not valid'))\n\n if settings.OCR_CLEAN not in {\"clean\", \"clean_final\"}:\n msgs.append(Error(f'OCR clean mode \"{settings.OCR_CLEAN}\" is not valid'))\n return msgs\n\n def _timezone_validate():\n \"\"\"\n Validates the user provided timezone is a valid timezone\n \"\"\"\n try:\n import zoneinfo\n except ImportError: # pragma: nocover\n import backports.zoneinfo as zoneinfo\n msgs = []\n if settings.TIME_ZONE not in zoneinfo.available_timezones():\n msgs.append(\n Error(f'Timezone \"{settings.TIME_ZONE}\" is not a valid timezone'),\n )\n return msgs\n\n return _ocrmypdf_settings_check() + _timezone_validate()\n", "path": "src/paperless/checks.py"}]} | 2,130 | 199 |
gh_patches_debug_34751 | rasdani/github-patches | git_diff | tournesol-app__tournesol-1330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[dev-end] load_public_dataset is broken
Hi everyone,
`load_public_dataset` seems to be broken.
Current branch: `main` commit `c9c3b9dab`
How to reproduce:
```
$ ./run-docker-compose.sh download --user-sampling 0.1
⠿ Container tournesol-dev-api Started 25.9s
Waiting for api to be ready...
Waiting for api to be ready...
Waiting for api to be ready...
Waiting for api to be ready...
Waiting for api to be ready...
Waiting for api to be ready...
Created 534 users
Created 5113 video entities
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/pandas/core/indexes/base.py", line 3621, in get_loc
return self._engine.get_loc(casted_key)
File "pandas/_libs/index.pyx", line 136, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 163, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 5198, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 5206, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'weight'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/backend/manage.py", line 22, in <module>
main()
File "/backend/manage.py", line 18, in main
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 440, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 414, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 460, in execute
output = self.handle(*args, **options)
File "/backend/tournesol/management/commands/load_public_dataset.py", line 113, in handle
weight=values["weight"],
File "/usr/local/lib/python3.9/site-packages/pandas/core/series.py", line 958, in __getitem__
return self._get_value(key)
File "/usr/local/lib/python3.9/site-packages/pandas/core/series.py", line 1069, in _get_value
loc = self.index.get_loc(label)
File "/usr/local/lib/python3.9/site-packages/pandas/core/indexes/base.py", line 3623, in get_loc
raise KeyError(key) from err
```
I will investigate but any help is welcome!
</issue>
<code>
[start of backend/tournesol/management/commands/load_public_dataset.py]
1 import concurrent
2 import random
3 from concurrent.futures import ThreadPoolExecutor
4
5 import pandas as pd
6 from django.conf import settings
7 from django.core.management import call_command
8 from django.core.management.base import BaseCommand
9 from django.db import transaction
10
11 from core.models import User
12 from core.models.user import EmailDomain
13 from tournesol.models import Comparison, ComparisonCriteriaScore, ContributorRating, Entity, Poll
14 from tournesol.models.poll import ALGORITHM_MEHESTAN
15
16 PUBLIC_DATASET_URL = "https://api.tournesol.app/exports/comparisons/"
17 RANDOM_SEED = 0
18 SEED_USERS = ["aidjango", "le_science4all", "lpfaucon", "biscuissec", "amatissart"]
19 PRETRUSTED_PROBABILITY = 0.1
20
21 thread_pool = ThreadPoolExecutor(max_workers=10)
22
23
24 class Command(BaseCommand):
25 help = "Generate a new database for dev purposes, derived from the public dataset"
26
27 def add_arguments(self, parser):
28 parser.add_argument("--user-sampling", type=float, default=None)
29
30 def create_user(self, username):
31 is_pretrusted = (
32 username in SEED_USERS
33 ) or random.random() < PRETRUSTED_PROBABILITY # nosec B311
34 email = f"{username}@trusted.example" if is_pretrusted else f"{username}@example.com"
35 user = User.objects.create_user(
36 username=username,
37 email=email,
38 is_staff=username in SEED_USERS
39 )
40 if user.is_staff:
41 # Set a default password for staff accounts (used in e2e tests, etc.)
42 user.set_password("tournesol")
43 user.save()
44 return user
45
46 def create_videos(self, video_ids):
47 videos = {}
48 for video_id in video_ids:
49 videos[video_id] = Entity.create_from_video_id(video_id, fetch_metadata=False)
50 return videos
51
52 def fetch_video_metadata(self, videos):
53 def refresh_metadata(video):
54 video.inner.refresh_metadata(force=True, compute_language=True)
55
56 futures = (thread_pool.submit(refresh_metadata, video) for video in videos.values())
57 for future in concurrent.futures.as_completed(futures):
58 # .result() will reraise any exception occured during refresh
59 future.result()
60
61 def create_test_user(self):
62 User.objects.create_user( # hardcoded password is delibarate # nosec B106
63 username="user1",
64 password="tournesol",
65 email="[email protected]"
66 )
67
68 def handle(self, *args, **options):
69 random.seed(RANDOM_SEED)
70
71 public_dataset = pd.read_csv(PUBLIC_DATASET_URL)
72 nb_comparisons = 0
73
74 with transaction.atomic():
75 poll = Poll.default_poll()
76 poll.algorithm = ALGORITHM_MEHESTAN
77 poll.save()
78
79 usernames = public_dataset.public_username.unique()
80 if options["user_sampling"]:
81 usernames = set(
82 pd.Series(usernames)
83 .sample(frac=options["user_sampling"], random_state=RANDOM_SEED)
84 .values
85 ).union(SEED_USERS)
86 public_dataset = public_dataset[public_dataset.public_username.isin(usernames)]
87
88 EmailDomain.objects.create(
89 domain="@trusted.example",
90 status=EmailDomain.STATUS_ACCEPTED
91 )
92
93 users = {username: self.create_user(username) for username in usernames}
94 print(f"Created {len(users)} users")
95
96 videos = self.create_videos(set(public_dataset.video_a) | set(public_dataset.video_b))
97 print(f"Created {len(videos)} video entities")
98
99 for ((username, video_a, video_b), rows) in public_dataset.groupby(
100 ["public_username", "video_a", "video_b"]
101 ):
102 comparison = Comparison.objects.create(
103 user=users[username],
104 poll=poll,
105 entity_1=videos[video_a],
106 entity_2=videos[video_b],
107 )
108 for _, values in rows.iterrows():
109 ComparisonCriteriaScore.objects.create(
110 comparison=comparison,
111 criteria=values["criteria"],
112 score=values["score"],
113 weight=values["weight"],
114 )
115 nb_comparisons += 1
116 print(f"Created {nb_comparisons} comparisons")
117
118 for entity in Entity.objects.iterator():
119 entity.update_n_ratings()
120
121 self.create_test_user()
122 ContributorRating.objects.update(is_public=True)
123
124 if settings.YOUTUBE_API_KEY:
125 print("Fetching video metadata from Youtube...")
126 self.fetch_video_metadata(videos)
127 print("Done.")
128
129 print("Running ml-train...")
130 call_command("ml_train")
131
[end of backend/tournesol/management/commands/load_public_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/tournesol/management/commands/load_public_dataset.py b/backend/tournesol/management/commands/load_public_dataset.py
--- a/backend/tournesol/management/commands/load_public_dataset.py
+++ b/backend/tournesol/management/commands/load_public_dataset.py
@@ -26,6 +26,7 @@
def add_arguments(self, parser):
parser.add_argument("--user-sampling", type=float, default=None)
+ parser.add_argument("--comparisons-url", type=str, default=PUBLIC_DATASET_URL)
def create_user(self, username):
is_pretrusted = (
@@ -55,11 +56,13 @@
futures = (thread_pool.submit(refresh_metadata, video) for video in videos.values())
for future in concurrent.futures.as_completed(futures):
- # .result() will reraise any exception occured during refresh
+ # .result() will reraise any exception occurred during refresh
future.result()
+ thread_pool.shutdown()
+
def create_test_user(self):
- User.objects.create_user( # hardcoded password is delibarate # nosec B106
+ User.objects.create_user( # hardcoded password is deliberate # nosec B106
username="user1",
password="tournesol",
email="[email protected]"
@@ -68,7 +71,7 @@
def handle(self, *args, **options):
random.seed(RANDOM_SEED)
- public_dataset = pd.read_csv(PUBLIC_DATASET_URL)
+ public_dataset = pd.read_csv(options["comparisons_url"])
nb_comparisons = 0
with transaction.atomic():
@@ -110,7 +113,6 @@
comparison=comparison,
criteria=values["criteria"],
score=values["score"],
- weight=values["weight"],
)
nb_comparisons += 1
print(f"Created {nb_comparisons} comparisons")
| {"golden_diff": "diff --git a/backend/tournesol/management/commands/load_public_dataset.py b/backend/tournesol/management/commands/load_public_dataset.py\n--- a/backend/tournesol/management/commands/load_public_dataset.py\n+++ b/backend/tournesol/management/commands/load_public_dataset.py\n@@ -26,6 +26,7 @@\n \n def add_arguments(self, parser):\n parser.add_argument(\"--user-sampling\", type=float, default=None)\n+ parser.add_argument(\"--comparisons-url\", type=str, default=PUBLIC_DATASET_URL)\n \n def create_user(self, username):\n is_pretrusted = (\n@@ -55,11 +56,13 @@\n \n futures = (thread_pool.submit(refresh_metadata, video) for video in videos.values())\n for future in concurrent.futures.as_completed(futures):\n- # .result() will reraise any exception occured during refresh\n+ # .result() will reraise any exception occurred during refresh\n future.result()\n \n+ thread_pool.shutdown()\n+\n def create_test_user(self):\n- User.objects.create_user( # hardcoded password is delibarate # nosec B106\n+ User.objects.create_user( # hardcoded password is deliberate # nosec B106\n username=\"user1\",\n password=\"tournesol\",\n email=\"[email protected]\"\n@@ -68,7 +71,7 @@\n def handle(self, *args, **options):\n random.seed(RANDOM_SEED)\n \n- public_dataset = pd.read_csv(PUBLIC_DATASET_URL)\n+ public_dataset = pd.read_csv(options[\"comparisons_url\"])\n nb_comparisons = 0\n \n with transaction.atomic():\n@@ -110,7 +113,6 @@\n comparison=comparison,\n criteria=values[\"criteria\"],\n score=values[\"score\"],\n- weight=values[\"weight\"],\n )\n nb_comparisons += 1\n print(f\"Created {nb_comparisons} comparisons\")\n", "issue": "[dev-end] load_public_dataset is broken\nHi everyone,\r\n\r\n`load_public_dataset` seems to be broken.\r\n\r\nCurrent branch: `main` commit `c9c3b9dab`\r\nHow to reproduce:\r\n\r\n```\r\n$ ./run-docker-compose.sh download --user-sampling 0.1 \r\n \u283f Container tournesol-dev-api Started 25.9s\r\nWaiting for api to be ready...\r\nWaiting for api to be ready...\r\nWaiting for api to be ready...\r\nWaiting for api to be ready...\r\nWaiting for api to be ready...\r\nWaiting for api to be ready...\r\n\r\n Created 534 users\r\nCreated 5113 video entities\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/pandas/core/indexes/base.py\", line 3621, in get_loc\r\n return self._engine.get_loc(casted_key)\r\n File \"pandas/_libs/index.pyx\", line 136, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/index.pyx\", line 163, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 5198, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 5206, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\nKeyError: 'weight'\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/backend/manage.py\", line 22, in <module>\r\n main()\r\n File \"/backend/manage.py\", line 18, in main\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 446, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 440, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 414, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 460, in execute\r\n output = self.handle(*args, **options)\r\n File \"/backend/tournesol/management/commands/load_public_dataset.py\", line 113, in handle\r\n weight=values[\"weight\"],\r\n File \"/usr/local/lib/python3.9/site-packages/pandas/core/series.py\", line 958, in __getitem__\r\n return self._get_value(key)\r\n File \"/usr/local/lib/python3.9/site-packages/pandas/core/series.py\", line 1069, in _get_value\r\n loc = self.index.get_loc(label)\r\n File \"/usr/local/lib/python3.9/site-packages/pandas/core/indexes/base.py\", line 3623, in get_loc\r\n raise KeyError(key) from err\r\n```\r\n\r\nI will investigate but any help is welcome!\n", "before_files": [{"content": "import concurrent\nimport random\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport pandas as pd\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom core.models import User\nfrom core.models.user import EmailDomain\nfrom tournesol.models import Comparison, ComparisonCriteriaScore, ContributorRating, Entity, Poll\nfrom tournesol.models.poll import ALGORITHM_MEHESTAN\n\nPUBLIC_DATASET_URL = \"https://api.tournesol.app/exports/comparisons/\"\nRANDOM_SEED = 0\nSEED_USERS = [\"aidjango\", \"le_science4all\", \"lpfaucon\", \"biscuissec\", \"amatissart\"]\nPRETRUSTED_PROBABILITY = 0.1\n\nthread_pool = ThreadPoolExecutor(max_workers=10)\n\n\nclass Command(BaseCommand):\n help = \"Generate a new database for dev purposes, derived from the public dataset\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"--user-sampling\", type=float, default=None)\n\n def create_user(self, username):\n is_pretrusted = (\n username in SEED_USERS\n ) or random.random() < PRETRUSTED_PROBABILITY # nosec B311\n email = f\"{username}@trusted.example\" if is_pretrusted else f\"{username}@example.com\"\n user = User.objects.create_user(\n username=username,\n email=email,\n is_staff=username in SEED_USERS\n )\n if user.is_staff:\n # Set a default password for staff accounts (used in e2e tests, etc.)\n user.set_password(\"tournesol\")\n user.save()\n return user\n\n def create_videos(self, video_ids):\n videos = {}\n for video_id in video_ids:\n videos[video_id] = Entity.create_from_video_id(video_id, fetch_metadata=False)\n return videos\n\n def fetch_video_metadata(self, videos):\n def refresh_metadata(video):\n video.inner.refresh_metadata(force=True, compute_language=True)\n\n futures = (thread_pool.submit(refresh_metadata, video) for video in videos.values())\n for future in concurrent.futures.as_completed(futures):\n # .result() will reraise any exception occured during refresh\n future.result()\n\n def create_test_user(self):\n User.objects.create_user( # hardcoded password is delibarate # nosec B106\n username=\"user1\",\n password=\"tournesol\",\n email=\"[email protected]\"\n )\n\n def handle(self, *args, **options):\n random.seed(RANDOM_SEED)\n\n public_dataset = pd.read_csv(PUBLIC_DATASET_URL)\n nb_comparisons = 0\n\n with transaction.atomic():\n poll = Poll.default_poll()\n poll.algorithm = ALGORITHM_MEHESTAN\n poll.save()\n\n usernames = public_dataset.public_username.unique()\n if options[\"user_sampling\"]:\n usernames = set(\n pd.Series(usernames)\n .sample(frac=options[\"user_sampling\"], random_state=RANDOM_SEED)\n .values\n ).union(SEED_USERS)\n public_dataset = public_dataset[public_dataset.public_username.isin(usernames)]\n\n EmailDomain.objects.create(\n domain=\"@trusted.example\",\n status=EmailDomain.STATUS_ACCEPTED\n )\n\n users = {username: self.create_user(username) for username in usernames}\n print(f\"Created {len(users)} users\")\n\n videos = self.create_videos(set(public_dataset.video_a) | set(public_dataset.video_b))\n print(f\"Created {len(videos)} video entities\")\n\n for ((username, video_a, video_b), rows) in public_dataset.groupby(\n [\"public_username\", \"video_a\", \"video_b\"]\n ):\n comparison = Comparison.objects.create(\n user=users[username],\n poll=poll,\n entity_1=videos[video_a],\n entity_2=videos[video_b],\n )\n for _, values in rows.iterrows():\n ComparisonCriteriaScore.objects.create(\n comparison=comparison,\n criteria=values[\"criteria\"],\n score=values[\"score\"],\n weight=values[\"weight\"],\n )\n nb_comparisons += 1\n print(f\"Created {nb_comparisons} comparisons\")\n\n for entity in Entity.objects.iterator():\n entity.update_n_ratings()\n\n self.create_test_user()\n ContributorRating.objects.update(is_public=True)\n\n if settings.YOUTUBE_API_KEY:\n print(\"Fetching video metadata from Youtube...\")\n self.fetch_video_metadata(videos)\n print(\"Done.\")\n\n print(\"Running ml-train...\")\n call_command(\"ml_train\")\n", "path": "backend/tournesol/management/commands/load_public_dataset.py"}]} | 2,543 | 444 |
gh_patches_debug_18925 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default to `default` distribution if client is not part of an experiment.
We want all Firefox clients to get the default bundle for the locale / channel combination, unless they are part of an experiment when using pregen bundles in https://github.com/mozmeao/snippets-service/blob/master/snippets/base/views.py#L63
This is because non-Mozilla distributors of Firefox (e.g. Linux Distributions) override the distribution field with their identification.
We 'll only use client's distribution if it starts with `experiment-`
</issue>
<code>
[start of snippets/base/views.py]
1 import json
2 from urllib.parse import urljoin, urlparse
3
4 from distutils.util import strtobool
5 from django.conf import settings
6 from django.contrib.auth.decorators import permission_required
7 from django.core.exceptions import ValidationError
8 from django.core.files.storage import default_storage
9 from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
10 from django.shortcuts import get_object_or_404, render
11 from django.utils.functional import lazy
12 from django.views.decorators.cache import cache_control
13 from django.views.decorators.csrf import csrf_exempt
14 from django.views.decorators.http import require_POST
15 from django.views.generic import TemplateView
16
17 import sentry_sdk
18 from django_filters.views import FilterView
19 from django_statsd.clients import statsd
20
21 from snippets.base import util
22 from snippets.base.bundles import ASRSnippetBundle, SnippetBundle
23 from snippets.base.decorators import access_control
24 from snippets.base.filters import JobFilter
25 from snippets.base.models import CHANNELS, ASRSnippet, Client, Snippet, SnippetTemplate
26 from snippets.base.util import get_object_or_none
27
28
29 def _bundle_timeout():
30 return getattr(settings, 'SNIPPET_BUNDLE_TIMEOUT')
31 SNIPPET_BUNDLE_TIMEOUT = lazy(_bundle_timeout, int)() # noqa
32
33
34 class HomeView(TemplateView):
35 template_name = 'base/home.jinja'
36
37
38 class JobListView(FilterView):
39 filterset_class = JobFilter
40
41 @property
42 def template_name(self):
43 if self.request.GET.get('calendar', 'false') == 'true':
44 return 'base/jobs_list_calendar.jinja'
45
46 return 'base/jobs_list_table.jinja'
47
48
49 def fetch_snippets(request, **kwargs):
50 if settings.USE_PREGEN_BUNDLES and kwargs['startpage_version'] == 6:
51 return fetch_snippet_pregen_bundle(request, **kwargs)
52 return fetch_snippet_bundle(request, **kwargs)
53
54
55 @cache_control(public=True, max_age=settings.SNIPPET_BUNDLE_PREGEN_REDIRECT_TIMEOUT)
56 def fetch_snippet_pregen_bundle(request, **kwargs):
57 statsd.incr('serve.bundle_pregen')
58 client = Client(**kwargs)
59 product = 'Firefox'
60 channel = client.channel.lower()
61 channel = next((item for item in CHANNELS if channel.startswith(item)), None) or 'release'
62 locale = client.locale.lower()
63 distribution = client.distribution.lower()
64 filename = (
65 f'{settings.MEDIA_BUNDLES_PREGEN_ROOT}/{product}/{channel}/'
66 f'{locale}/{distribution}.json'
67 )
68
69 full_url = urljoin(settings.CDN_URL or settings.SITE_URL,
70 urlparse(default_storage.url(filename)).path)
71 # Remove AWS S3 parameters
72 full_url = full_url.split('?')[0]
73
74 return HttpResponseRedirect(full_url)
75
76
77 @cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)
78 @access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)
79 def fetch_snippet_bundle(request, **kwargs):
80 """
81 Return one of the following responses:
82 - 200 with empty body when the bundle is empty
83 - 302 to a bundle URL after generating it if not cached.
84 """
85 statsd.incr('serve.snippets')
86
87 client = Client(**kwargs)
88 if client.startpage_version == 6:
89 bundle = ASRSnippetBundle(client)
90 else:
91 bundle = SnippetBundle(client)
92 if bundle.empty:
93 statsd.incr('bundle.empty')
94
95 if client.startpage_version == 6:
96 # Return valid JSON for Activity Stream Router
97 return HttpResponse(status=200, content='{}', content_type='application/json')
98
99 # This is not a 204 because Activity Stream expects content, even if
100 # it's empty.
101 return HttpResponse(status=200, content='')
102 elif bundle.cached:
103 statsd.incr('bundle.cached')
104 else:
105 statsd.incr('bundle.generate')
106 bundle.generate()
107
108 return HttpResponseRedirect(bundle.url)
109
110
111 def preview_asr_snippet(request, uuid):
112 try:
113 snippet = get_object_or_404(ASRSnippet, uuid=uuid)
114 except ValidationError:
115 # Raised when UUID is a badly formed hexadecimal UUID string
116 raise Http404()
117
118 bundle_content = json.dumps({
119 'messages': [snippet.render(preview=True)],
120 })
121 return HttpResponse(bundle_content, content_type='application/json')
122
123
124 @csrf_exempt
125 @permission_required('base.change_snippet')
126 def preview_snippet(request):
127 """
128 Build a snippet using info from the POST parameters, and preview that
129 snippet on a mock about:home page.
130 """
131 try:
132 template_id = int(request.POST.get('template_id', None))
133 except (TypeError, ValueError):
134 return HttpResponseBadRequest()
135
136 template = get_object_or_none(SnippetTemplate, id=template_id)
137 data = request.POST.get('data', None)
138
139 # Validate that data is JSON.
140 try:
141 json.loads(data)
142 except (TypeError, ValueError):
143 data = None
144
145 # If your parameters are wrong, I have no sympathy for you.
146 if data is None or template is None:
147 return HttpResponseBadRequest()
148
149 # Build a snippet that isn't saved so we can render it.
150 snippet = Snippet(template=template, data=data)
151
152 if strtobool(request.POST.get('activity_stream', 'false')):
153 template_name = 'base/preview_as.jinja'
154 preview_client = Client(5, 'Firefox', '57.0', 'default', 'default', 'en-US',
155 'release', 'default', 'default', 'default')
156 else:
157 template_name = 'base/preview.jinja'
158 preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',
159 'release', 'default', 'default', 'default')
160
161 skip_boilerplate = request.POST.get('skip_boilerplate', 'false')
162 skip_boilerplate = strtobool(skip_boilerplate)
163 if skip_boilerplate:
164 template_name = 'base/preview_without_shell.jinja'
165
166 return render(request, template_name, {
167 'snippets_json': json.dumps([snippet.to_dict()]),
168 'client': preview_client,
169 'preview': True,
170 'current_firefox_major_version': util.current_firefox_major_version(),
171 })
172
173
174 def show_snippet(request, snippet_id, uuid=False):
175 preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',
176 'release', 'default', 'default', 'default')
177
178 if uuid:
179 snippet = get_object_or_404(Snippet, uuid=snippet_id)
180 else:
181 snippet = get_object_or_404(Snippet, pk=snippet_id)
182 if not snippet.published and not request.user.is_authenticated:
183 raise Http404()
184
185 template = 'base/preview.jinja'
186 if snippet.on_startpage_5:
187 template = 'base/preview_as.jinja'
188 return render(request, template, {
189 'snippets_json': json.dumps([snippet.to_dict()]),
190 'client': preview_client,
191 'preview': True,
192 'current_firefox_major_version': util.current_firefox_major_version(),
193 })
194
195
196 @csrf_exempt
197 @require_POST
198 def csp_violation_capture(request):
199 try:
200 csp_data = json.loads(request.body)
201 except ValueError:
202 # Cannot decode CSP violation data, ignore
203 return HttpResponseBadRequest('Invalid CSP Report')
204
205 try:
206 blocked_uri = csp_data['csp-report']['blocked-uri']
207 except KeyError:
208 # Incomplete CSP report
209 return HttpResponseBadRequest('Incomplete CSP Report')
210
211 with sentry_sdk.configure_scope() as scope:
212 scope.level = 'info'
213 scope.set_tag('logger', 'csp')
214
215 sentry_sdk.capture_message(
216 message='CSP Violation: {}'.format(blocked_uri))
217
218 return HttpResponse('Captured CSP violation, thanks for reporting.')
219
[end of snippets/base/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/snippets/base/views.py b/snippets/base/views.py
--- a/snippets/base/views.py
+++ b/snippets/base/views.py
@@ -60,7 +60,20 @@
channel = client.channel.lower()
channel = next((item for item in CHANNELS if channel.startswith(item)), None) or 'release'
locale = client.locale.lower()
+
+ # Distribution populated by client's distribution if it starts with
+ # `experiment-`. Otherwise default to `default`.
+ #
+ # This is because non-Mozilla distributors of Firefox (e.g. Linux
+ # Distributions) override the distribution field with their identification.
+ # We want all Firefox clients to get the default bundle for the locale /
+ # channel combination, unless they are part of an experiment.
distribution = client.distribution.lower()
+ if distribution.startswith('experiment-'):
+ distribution = distribution[11:]
+ else:
+ distribution = 'default'
+
filename = (
f'{settings.MEDIA_BUNDLES_PREGEN_ROOT}/{product}/{channel}/'
f'{locale}/{distribution}.json'
| {"golden_diff": "diff --git a/snippets/base/views.py b/snippets/base/views.py\n--- a/snippets/base/views.py\n+++ b/snippets/base/views.py\n@@ -60,7 +60,20 @@\n channel = client.channel.lower()\n channel = next((item for item in CHANNELS if channel.startswith(item)), None) or 'release'\n locale = client.locale.lower()\n+\n+ # Distribution populated by client's distribution if it starts with\n+ # `experiment-`. Otherwise default to `default`.\n+ #\n+ # This is because non-Mozilla distributors of Firefox (e.g. Linux\n+ # Distributions) override the distribution field with their identification.\n+ # We want all Firefox clients to get the default bundle for the locale /\n+ # channel combination, unless they are part of an experiment.\n distribution = client.distribution.lower()\n+ if distribution.startswith('experiment-'):\n+ distribution = distribution[11:]\n+ else:\n+ distribution = 'default'\n+\n filename = (\n f'{settings.MEDIA_BUNDLES_PREGEN_ROOT}/{product}/{channel}/'\n f'{locale}/{distribution}.json'\n", "issue": "Default to `default` distribution if client is not part of an experiment.\nWe want all Firefox clients to get the default bundle for the locale / channel combination, unless they are part of an experiment when using pregen bundles in https://github.com/mozmeao/snippets-service/blob/master/snippets/base/views.py#L63\r\n\r\nThis is because non-Mozilla distributors of Firefox (e.g. Linux Distributions) override the distribution field with their identification. \r\n\r\nWe 'll only use client's distribution if it starts with `experiment-`\n", "before_files": [{"content": "import json\nfrom urllib.parse import urljoin, urlparse\n\nfrom distutils.util import strtobool\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.storage import default_storage\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.functional import lazy\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import TemplateView\n\nimport sentry_sdk\nfrom django_filters.views import FilterView\nfrom django_statsd.clients import statsd\n\nfrom snippets.base import util\nfrom snippets.base.bundles import ASRSnippetBundle, SnippetBundle\nfrom snippets.base.decorators import access_control\nfrom snippets.base.filters import JobFilter\nfrom snippets.base.models import CHANNELS, ASRSnippet, Client, Snippet, SnippetTemplate\nfrom snippets.base.util import get_object_or_none\n\n\ndef _bundle_timeout():\n return getattr(settings, 'SNIPPET_BUNDLE_TIMEOUT')\nSNIPPET_BUNDLE_TIMEOUT = lazy(_bundle_timeout, int)() # noqa\n\n\nclass HomeView(TemplateView):\n template_name = 'base/home.jinja'\n\n\nclass JobListView(FilterView):\n filterset_class = JobFilter\n\n @property\n def template_name(self):\n if self.request.GET.get('calendar', 'false') == 'true':\n return 'base/jobs_list_calendar.jinja'\n\n return 'base/jobs_list_table.jinja'\n\n\ndef fetch_snippets(request, **kwargs):\n if settings.USE_PREGEN_BUNDLES and kwargs['startpage_version'] == 6:\n return fetch_snippet_pregen_bundle(request, **kwargs)\n return fetch_snippet_bundle(request, **kwargs)\n\n\n@cache_control(public=True, max_age=settings.SNIPPET_BUNDLE_PREGEN_REDIRECT_TIMEOUT)\ndef fetch_snippet_pregen_bundle(request, **kwargs):\n statsd.incr('serve.bundle_pregen')\n client = Client(**kwargs)\n product = 'Firefox'\n channel = client.channel.lower()\n channel = next((item for item in CHANNELS if channel.startswith(item)), None) or 'release'\n locale = client.locale.lower()\n distribution = client.distribution.lower()\n filename = (\n f'{settings.MEDIA_BUNDLES_PREGEN_ROOT}/{product}/{channel}/'\n f'{locale}/{distribution}.json'\n )\n\n full_url = urljoin(settings.CDN_URL or settings.SITE_URL,\n urlparse(default_storage.url(filename)).path)\n # Remove AWS S3 parameters\n full_url = full_url.split('?')[0]\n\n return HttpResponseRedirect(full_url)\n\n\n@cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)\n@access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)\ndef fetch_snippet_bundle(request, **kwargs):\n \"\"\"\n Return one of the following responses:\n - 200 with empty body when the bundle is empty\n - 302 to a bundle URL after generating it if not cached.\n \"\"\"\n statsd.incr('serve.snippets')\n\n client = Client(**kwargs)\n if client.startpage_version == 6:\n bundle = ASRSnippetBundle(client)\n else:\n bundle = SnippetBundle(client)\n if bundle.empty:\n statsd.incr('bundle.empty')\n\n if client.startpage_version == 6:\n # Return valid JSON for Activity Stream Router\n return HttpResponse(status=200, content='{}', content_type='application/json')\n\n # This is not a 204 because Activity Stream expects content, even if\n # it's empty.\n return HttpResponse(status=200, content='')\n elif bundle.cached:\n statsd.incr('bundle.cached')\n else:\n statsd.incr('bundle.generate')\n bundle.generate()\n\n return HttpResponseRedirect(bundle.url)\n\n\ndef preview_asr_snippet(request, uuid):\n try:\n snippet = get_object_or_404(ASRSnippet, uuid=uuid)\n except ValidationError:\n # Raised when UUID is a badly formed hexadecimal UUID string\n raise Http404()\n\n bundle_content = json.dumps({\n 'messages': [snippet.render(preview=True)],\n })\n return HttpResponse(bundle_content, content_type='application/json')\n\n\n@csrf_exempt\n@permission_required('base.change_snippet')\ndef preview_snippet(request):\n \"\"\"\n Build a snippet using info from the POST parameters, and preview that\n snippet on a mock about:home page.\n \"\"\"\n try:\n template_id = int(request.POST.get('template_id', None))\n except (TypeError, ValueError):\n return HttpResponseBadRequest()\n\n template = get_object_or_none(SnippetTemplate, id=template_id)\n data = request.POST.get('data', None)\n\n # Validate that data is JSON.\n try:\n json.loads(data)\n except (TypeError, ValueError):\n data = None\n\n # If your parameters are wrong, I have no sympathy for you.\n if data is None or template is None:\n return HttpResponseBadRequest()\n\n # Build a snippet that isn't saved so we can render it.\n snippet = Snippet(template=template, data=data)\n\n if strtobool(request.POST.get('activity_stream', 'false')):\n template_name = 'base/preview_as.jinja'\n preview_client = Client(5, 'Firefox', '57.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n else:\n template_name = 'base/preview.jinja'\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n skip_boilerplate = request.POST.get('skip_boilerplate', 'false')\n skip_boilerplate = strtobool(skip_boilerplate)\n if skip_boilerplate:\n template_name = 'base/preview_without_shell.jinja'\n\n return render(request, template_name, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\ndef show_snippet(request, snippet_id, uuid=False):\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n if uuid:\n snippet = get_object_or_404(Snippet, uuid=snippet_id)\n else:\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n if not snippet.published and not request.user.is_authenticated:\n raise Http404()\n\n template = 'base/preview.jinja'\n if snippet.on_startpage_5:\n template = 'base/preview_as.jinja'\n return render(request, template, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\n@csrf_exempt\n@require_POST\ndef csp_violation_capture(request):\n try:\n csp_data = json.loads(request.body)\n except ValueError:\n # Cannot decode CSP violation data, ignore\n return HttpResponseBadRequest('Invalid CSP Report')\n\n try:\n blocked_uri = csp_data['csp-report']['blocked-uri']\n except KeyError:\n # Incomplete CSP report\n return HttpResponseBadRequest('Incomplete CSP Report')\n\n with sentry_sdk.configure_scope() as scope:\n scope.level = 'info'\n scope.set_tag('logger', 'csp')\n\n sentry_sdk.capture_message(\n message='CSP Violation: {}'.format(blocked_uri))\n\n return HttpResponse('Captured CSP violation, thanks for reporting.')\n", "path": "snippets/base/views.py"}]} | 2,923 | 243 |
gh_patches_debug_5394 | rasdani/github-patches | git_diff | cupy__cupy-1717 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`cupy.nextafter` is wrong on a=0
`test_nextafter_combination` is failing.
`{'dtype_b': <type 'numpy.float32'>, 'dtype_a': <type 'numpy.bool_'>}`
```
x = array([[1.1754944e-38, 1.0000001e+00, 1.1754944e-38],
[1.0000001e+00, 1.1754944e-38, 1.0000000e+00]], dtype=float32)
y = array([[1.4012985e-45, 1.0000001e+00, 1.4012985e-45],
[1.0000001e+00, 1.4012985e-45, 1.0000000e+00]], dtype=float32)
```
</issue>
<code>
[start of cupy/math/floating.py]
1 from cupy import core
2 from cupy.math import ufunc
3
4
5 signbit = core.create_ufunc(
6 'cupy_signbit',
7 ('e->?', 'f->?', 'd->?'),
8 'out0 = signbit(in0)',
9 doc='''Tests elementwise if the sign bit is set (i.e. less than zero).
10
11 .. seealso:: :data:`numpy.signbit`
12
13 ''')
14
15
16 copysign = ufunc.create_math_ufunc(
17 'copysign', 2, 'cupy_copysign',
18 '''Returns the first argument with the sign bit of the second elementwise.
19
20 .. seealso:: :data:`numpy.copysign`
21
22 ''')
23
24
25 ldexp = core.create_ufunc(
26 'cupy_ldexp',
27 ('ei->e', 'fi->f', 'el->e', 'fl->f', 'di->d', 'dq->d'),
28 'out0 = ldexp(in0, in1)',
29 doc='''Computes ``x1 * 2 ** x2`` elementwise.
30
31 .. seealso:: :data:`numpy.ldexp`
32
33 ''')
34
35
36 frexp = core.create_ufunc(
37 'cupy_frexp',
38 ('e->ei', 'f->fi', 'd->di'),
39 'int nptr; out0 = frexp(in0, &nptr); out1 = nptr',
40 doc='''Decomposes each element to mantissa and two's exponent.
41
42 This ufunc outputs two arrays of the input dtype and the ``int`` dtype.
43
44 .. seealso:: :data:`numpy.frexp`
45
46 ''')
47
48
49 nextafter = ufunc.create_math_ufunc(
50 'nextafter', 2, 'cupy_nextafter',
51 '''Computes the nearest neighbor float values towards the second argument.
52
53 .. seealso:: :data:`numpy.nextafter`
54
55 ''')
56
[end of cupy/math/floating.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/math/floating.py b/cupy/math/floating.py
--- a/cupy/math/floating.py
+++ b/cupy/math/floating.py
@@ -50,6 +50,11 @@
'nextafter', 2, 'cupy_nextafter',
'''Computes the nearest neighbor float values towards the second argument.
+ .. note::
+ For values that are close to zero (or denormal numbers),
+ results of :func:`cupy.nextafter` may be different from those of
+ :func:`numpy.nextafter`, because CuPy sets ``-ftz=true``.
+
.. seealso:: :data:`numpy.nextafter`
''')
| {"golden_diff": "diff --git a/cupy/math/floating.py b/cupy/math/floating.py\n--- a/cupy/math/floating.py\n+++ b/cupy/math/floating.py\n@@ -50,6 +50,11 @@\n 'nextafter', 2, 'cupy_nextafter',\n '''Computes the nearest neighbor float values towards the second argument.\n \n+ .. note::\n+ For values that are close to zero (or denormal numbers),\n+ results of :func:`cupy.nextafter` may be different from those of\n+ :func:`numpy.nextafter`, because CuPy sets ``-ftz=true``.\n+\n .. seealso:: :data:`numpy.nextafter`\n \n ''')\n", "issue": "`cupy.nextafter` is wrong on a=0\n`test_nextafter_combination` is failing.\r\n\r\n`{'dtype_b': <type 'numpy.float32'>, 'dtype_a': <type 'numpy.bool_'>}`\r\n```\r\nx = array([[1.1754944e-38, 1.0000001e+00, 1.1754944e-38],\r\n [1.0000001e+00, 1.1754944e-38, 1.0000000e+00]], dtype=float32)\r\ny = array([[1.4012985e-45, 1.0000001e+00, 1.4012985e-45],\r\n [1.0000001e+00, 1.4012985e-45, 1.0000000e+00]], dtype=float32)\r\n```\n", "before_files": [{"content": "from cupy import core\nfrom cupy.math import ufunc\n\n\nsignbit = core.create_ufunc(\n 'cupy_signbit',\n ('e->?', 'f->?', 'd->?'),\n 'out0 = signbit(in0)',\n doc='''Tests elementwise if the sign bit is set (i.e. less than zero).\n\n .. seealso:: :data:`numpy.signbit`\n\n ''')\n\n\ncopysign = ufunc.create_math_ufunc(\n 'copysign', 2, 'cupy_copysign',\n '''Returns the first argument with the sign bit of the second elementwise.\n\n .. seealso:: :data:`numpy.copysign`\n\n ''')\n\n\nldexp = core.create_ufunc(\n 'cupy_ldexp',\n ('ei->e', 'fi->f', 'el->e', 'fl->f', 'di->d', 'dq->d'),\n 'out0 = ldexp(in0, in1)',\n doc='''Computes ``x1 * 2 ** x2`` elementwise.\n\n .. seealso:: :data:`numpy.ldexp`\n\n ''')\n\n\nfrexp = core.create_ufunc(\n 'cupy_frexp',\n ('e->ei', 'f->fi', 'd->di'),\n 'int nptr; out0 = frexp(in0, &nptr); out1 = nptr',\n doc='''Decomposes each element to mantissa and two's exponent.\n\n This ufunc outputs two arrays of the input dtype and the ``int`` dtype.\n\n .. seealso:: :data:`numpy.frexp`\n\n ''')\n\n\nnextafter = ufunc.create_math_ufunc(\n 'nextafter', 2, 'cupy_nextafter',\n '''Computes the nearest neighbor float values towards the second argument.\n\n .. seealso:: :data:`numpy.nextafter`\n\n ''')\n", "path": "cupy/math/floating.py"}]} | 1,321 | 154 |
gh_patches_debug_34472 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3951 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider dominos_pizza_jp is broken
During the global build at 2021-06-30-14-42-26, spider **dominos_pizza_jp** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/dominos_pizza_jp.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson))
</issue>
<code>
[start of locations/spiders/dominos_pizza_jp.py]
1 # -*- coding: utf-8 -*-
2 import re
3
4 import scrapy
5
6 from locations.items import GeojsonPointItem
7
8
9 class DominosPizzaJPSpider(scrapy.Spider):
10 name = "dominos_pizza_jp"
11 item_attributes = {"brand": "Domino's", "brand_wikidata": "Q839466"}
12 allowed_domains = ["dominos.jp"]
13 start_urls = [
14 "https://www.dominos.jp/sitemap.aspx",
15 ]
16 download_delay = 0.3
17
18 def parse(self, response):
19 response.selector.remove_namespaces()
20 store_urls = response.xpath('//url/loc/text()[contains(.,"/store/")]').extract()
21 for url in store_urls:
22 yield scrapy.Request(url, callback=self.parse_store)
23
24 def parse_store(self, response):
25 ref = re.search(r".+/(.+?)/?(?:\.html|$)", response.url).group(1)
26
27 properties = {
28 "ref": ref,
29 "name": response.xpath(
30 'normalize-space(//div[@class="storetitle"][1]/text())'
31 ).extract_first(),
32 "addr_full": response.xpath(
33 'normalize-space(//span[@id="store-address-info"]/p/a/text())'
34 ).extract_first(),
35 "postcode": re.search(
36 r"([\d-]*)$",
37 response.xpath(
38 'normalize-space(//div[@class="store-details-text"][1]/p/text())'
39 ).extract_first(),
40 ).group(1),
41 "country": "JP",
42 "lat": response.xpath(
43 'normalize-space(//input[@id="store-lat"]/@value)'
44 ).extract_first(),
45 "lon": response.xpath(
46 'normalize-space(//input[@id="store-lon"]/@value)'
47 ).extract_first(),
48 "phone": re.search(
49 r"\s([\d-]*)$",
50 response.xpath('//div[@id="store-tel"]/a/text()').extract_first(),
51 ).group(1),
52 "website": response.url,
53 }
54
55 yield GeojsonPointItem(**properties)
56
[end of locations/spiders/dominos_pizza_jp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/dominos_pizza_jp.py b/locations/spiders/dominos_pizza_jp.py
--- a/locations/spiders/dominos_pizza_jp.py
+++ b/locations/spiders/dominos_pizza_jp.py
@@ -8,7 +8,11 @@
class DominosPizzaJPSpider(scrapy.Spider):
name = "dominos_pizza_jp"
- item_attributes = {"brand": "Domino's", "brand_wikidata": "Q839466"}
+ item_attributes = {
+ "brand": "Domino's",
+ "brand_wikidata": "Q839466",
+ "country": "JP",
+ }
allowed_domains = ["dominos.jp"]
start_urls = [
"https://www.dominos.jp/sitemap.aspx",
@@ -27,28 +31,24 @@
properties = {
"ref": ref,
"name": response.xpath(
- 'normalize-space(//div[@class="storetitle"][1]/text())'
+ 'normalize-space(//h1[@class="storetitle"][1]/text())'
).extract_first(),
"addr_full": response.xpath(
- 'normalize-space(//span[@id="store-address-info"]/p/a/text())'
+ 'normalize-space(//span[@id="store-address-info"]/p/text()[4])'
).extract_first(),
"postcode": re.search(
r"([\d-]*)$",
response.xpath(
- 'normalize-space(//div[@class="store-details-text"][1]/p/text())'
+ 'normalize-space(//div[@class="store-details-text"]/span/p/text()[2])'
).extract_first(),
).group(1),
- "country": "JP",
"lat": response.xpath(
'normalize-space(//input[@id="store-lat"]/@value)'
).extract_first(),
"lon": response.xpath(
'normalize-space(//input[@id="store-lon"]/@value)'
).extract_first(),
- "phone": re.search(
- r"\s([\d-]*)$",
- response.xpath('//div[@id="store-tel"]/a/text()').extract_first(),
- ).group(1),
+ "phone": response.xpath('//div[@id="store-tel"]/a/text()').extract_first(),
"website": response.url,
}
| {"golden_diff": "diff --git a/locations/spiders/dominos_pizza_jp.py b/locations/spiders/dominos_pizza_jp.py\n--- a/locations/spiders/dominos_pizza_jp.py\n+++ b/locations/spiders/dominos_pizza_jp.py\n@@ -8,7 +8,11 @@\n \n class DominosPizzaJPSpider(scrapy.Spider):\n name = \"dominos_pizza_jp\"\n- item_attributes = {\"brand\": \"Domino's\", \"brand_wikidata\": \"Q839466\"}\n+ item_attributes = {\n+ \"brand\": \"Domino's\",\n+ \"brand_wikidata\": \"Q839466\",\n+ \"country\": \"JP\",\n+ }\n allowed_domains = [\"dominos.jp\"]\n start_urls = [\n \"https://www.dominos.jp/sitemap.aspx\",\n@@ -27,28 +31,24 @@\n properties = {\n \"ref\": ref,\n \"name\": response.xpath(\n- 'normalize-space(//div[@class=\"storetitle\"][1]/text())'\n+ 'normalize-space(//h1[@class=\"storetitle\"][1]/text())'\n ).extract_first(),\n \"addr_full\": response.xpath(\n- 'normalize-space(//span[@id=\"store-address-info\"]/p/a/text())'\n+ 'normalize-space(//span[@id=\"store-address-info\"]/p/text()[4])'\n ).extract_first(),\n \"postcode\": re.search(\n r\"([\\d-]*)$\",\n response.xpath(\n- 'normalize-space(//div[@class=\"store-details-text\"][1]/p/text())'\n+ 'normalize-space(//div[@class=\"store-details-text\"]/span/p/text()[2])'\n ).extract_first(),\n ).group(1),\n- \"country\": \"JP\",\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"store-lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"store-lon\"]/@value)'\n ).extract_first(),\n- \"phone\": re.search(\n- r\"\\s([\\d-]*)$\",\n- response.xpath('//div[@id=\"store-tel\"]/a/text()').extract_first(),\n- ).group(1),\n+ \"phone\": response.xpath('//div[@id=\"store-tel\"]/a/text()').extract_first(),\n \"website\": response.url,\n }\n", "issue": "Spider dominos_pizza_jp is broken\nDuring the global build at 2021-06-30-14-42-26, spider **dominos_pizza_jp** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/dominos_pizza_jp.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/dominos_pizza_jp.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass DominosPizzaJPSpider(scrapy.Spider):\n name = \"dominos_pizza_jp\"\n item_attributes = {\"brand\": \"Domino's\", \"brand_wikidata\": \"Q839466\"}\n allowed_domains = [\"dominos.jp\"]\n start_urls = [\n \"https://www.dominos.jp/sitemap.aspx\",\n ]\n download_delay = 0.3\n\n def parse(self, response):\n response.selector.remove_namespaces()\n store_urls = response.xpath('//url/loc/text()[contains(.,\"/store/\")]').extract()\n for url in store_urls:\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n ref = re.search(r\".+/(.+?)/?(?:\\.html|$)\", response.url).group(1)\n\n properties = {\n \"ref\": ref,\n \"name\": response.xpath(\n 'normalize-space(//div[@class=\"storetitle\"][1]/text())'\n ).extract_first(),\n \"addr_full\": response.xpath(\n 'normalize-space(//span[@id=\"store-address-info\"]/p/a/text())'\n ).extract_first(),\n \"postcode\": re.search(\n r\"([\\d-]*)$\",\n response.xpath(\n 'normalize-space(//div[@class=\"store-details-text\"][1]/p/text())'\n ).extract_first(),\n ).group(1),\n \"country\": \"JP\",\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"store-lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"store-lon\"]/@value)'\n ).extract_first(),\n \"phone\": re.search(\n r\"\\s([\\d-]*)$\",\n response.xpath('//div[@id=\"store-tel\"]/a/text()').extract_first(),\n ).group(1),\n \"website\": response.url,\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/dominos_pizza_jp.py"}]} | 1,307 | 541 |
gh_patches_debug_11441 | rasdani/github-patches | git_diff | fossasia__open-event-server-5559 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow GET access to ticket attendees for an order, for non logged in users
parent issue #5546
</issue>
<code>
[start of app/api/attendees.py]
1 from datetime import datetime
2
3 from flask import Blueprint, request, jsonify, abort, make_response
4 from flask_jwt import current_identity, jwt_required
5 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
6 from flask_rest_jsonapi.exceptions import ObjectNotFound
7 from sqlalchemy.orm.exc import NoResultFound
8
9 from app.api.bootstrap import api
10 from app.api.helpers.db import safe_query, get_count
11 from app.api.helpers.exceptions import (
12 ConflictException,
13 ForbiddenException,
14 UnprocessableEntity,
15 )
16 from app.api.helpers.mail import send_email_to_attendees
17 from app.api.helpers.permission_manager import has_access
18 from app.api.helpers.permissions import jwt_required
19 from app.api.helpers.query import event_query
20 from app.api.helpers.utilities import require_relationship
21 from app.api.schema.attendees import AttendeeSchema
22 from app.models import db
23 from app.models.order import Order
24 from app.models.ticket import Ticket
25 from app.models.ticket_holder import TicketHolder
26 from app.models.user import User
27
28 attendee_misc_routes = Blueprint('attendee_misc', __name__, url_prefix='/v1')
29
30
31 class AttendeeListPost(ResourceList):
32 """
33 List and create Attendees through direct URL
34 """
35
36 def before_post(self, args, kwargs, data):
37 """
38 Before post method to check for required relationship and proper permissions
39 :param args:
40 :param kwargs:
41 :param data:
42 :return:
43 """
44 require_relationship(['ticket', 'event'], data)
45
46 ticket = db.session.query(Ticket).filter_by(
47 id=int(data['ticket']), deleted_at=None
48 ).first()
49 if ticket is None:
50 raise UnprocessableEntity(
51 {'pointer': '/data/relationships/ticket'}, "Invalid Ticket"
52 )
53 if ticket.event_id != int(data['event']):
54 raise UnprocessableEntity(
55 {'pointer': '/data/relationships/ticket'},
56 "Ticket belongs to a different Event"
57 )
58 # Check if the ticket is already sold out or not.
59 if get_count(db.session.query(TicketHolder.id).
60 filter_by(ticket_id=int(data['ticket']), deleted_at=None)) >= ticket.quantity:
61 raise ConflictException(
62 {'pointer': '/data/attributes/ticket_id'},
63 "Ticket already sold out"
64 )
65
66 if 'device_name_checkin' in data and data['device_name_checkin'] is not None:
67 if 'is_checked_in' not in data or not data['is_checked_in']:
68 raise UnprocessableEntity(
69 {'pointer': '/data/attributes/device_name_checkin'},
70 "Attendee needs to be checked in first"
71 )
72 elif 'checkin_times' not in data or data['checkin_times'] is None:
73 raise UnprocessableEntity(
74 {'pointer': '/data/attributes/device_name_checkin'},
75 "Check in Times missing"
76 )
77 elif len(data['checkin_times'].split(",")) != len(data['device_name_checkin'].split(",")):
78 raise UnprocessableEntity(
79 {'pointer': '/data/attributes/device_name_checkin'},
80 "Check in Times missing for the corresponding device name"
81 )
82
83 if 'checkin_times' in data:
84 if 'device_name_checkin' not in data or data['device_name_checkin'] is None:
85 data['device_name_checkin'] = '-'
86
87 decorators = (jwt_required,)
88 methods = ['POST']
89 schema = AttendeeSchema
90 data_layer = {'session': db.session,
91 'model': TicketHolder}
92
93
94 class AttendeeList(ResourceList):
95 """
96 List Attendees
97 """
98 def query(self, view_kwargs):
99 """
100 query method for Attendees List
101 :param view_kwargs:
102 :return:
103 """
104 query_ = self.session.query(TicketHolder)
105
106 if view_kwargs.get('order_identifier'):
107 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')
108 if not has_access('is_registrar', event_id=order.event_id) and not has_access('is_user_itself',
109 user_id=order.user_id):
110 raise ForbiddenException({'source': ''}, 'Access Forbidden')
111 query_ = query_.join(Order).filter(Order.id == order.id)
112
113 if view_kwargs.get('ticket_id'):
114 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
115 if not has_access('is_registrar', event_id=ticket.event_id):
116 raise ForbiddenException({'source': ''}, 'Access Forbidden')
117 query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)
118
119 if view_kwargs.get('user_id'):
120 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
121 if not has_access('is_user_itself', user_id=user.id):
122 raise ForbiddenException({'source': ''}, 'Access Forbidden')
123 query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)
124
125 query_ = event_query(self, query_, view_kwargs, permission='is_registrar')
126 return query_
127
128 view_kwargs = True
129 methods = ['GET', ]
130 schema = AttendeeSchema
131 data_layer = {'session': db.session,
132 'model': TicketHolder,
133 'methods': {
134 'query': query
135 }}
136
137
138 class AttendeeDetail(ResourceDetail):
139 """
140 Attendee detail by id
141 """
142 def before_get_object(self, view_kwargs):
143 """
144 before get object method for attendee detail
145 :param view_kwargs:
146 :return:
147 """
148 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')
149 if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):
150 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
151
152 def before_delete_object(self, obj, kwargs):
153 """
154 before delete object method for attendee detail
155 :param obj:
156 :param kwargs:
157 :return:
158 """
159 if not has_access('is_registrar', event_id=obj.event_id):
160 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
161
162 def before_update_object(self, obj, data, kwargs):
163 """
164 before update object method for attendee detail
165 :param obj:
166 :param data:
167 :param kwargs:
168 :return:
169 """
170 # if not has_access('is_registrar', event_id=obj.event_id):
171 # raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
172
173 if 'device_name_checkin' in data:
174 if 'checkin_times' not in data or data['checkin_times'] is None:
175 raise UnprocessableEntity(
176 {'pointer': '/data/attributes/device_name_checkin'},
177 "Check in Times missing"
178 )
179
180 if 'is_checked_in' in data and data['is_checked_in']:
181 if 'checkin_times' not in data or data['checkin_times'] is None:
182 raise UnprocessableEntity({'pointer': '/data/attributes/checkin_times'},
183 "Check in time missing while trying to check in attendee")
184 else:
185 if obj.checkin_times and data['checkin_times'] not in obj.checkin_times.split(","):
186 data['checkin_times'] = '{},{}'.format(obj.checkin_times, data['checkin_times'])
187 elif obj.checkin_times and data['checkin_times'] in obj.checkin_times.split(","):
188 raise UnprocessableEntity(
189 {'pointer': '/data/attributes/checkin_times'},
190 "Check in time already present"
191 )
192
193 if 'device_name_checkin' in data and data['device_name_checkin'] is not None:
194 if obj.device_name_checkin is not None:
195 data['device_name_checkin'] = '{},{}'.format(obj.device_name_checkin,
196 data['device_name_checkin'])
197
198 if len(data['checkin_times'].split(",")) != len(data['device_name_checkin'].split(",")):
199 raise UnprocessableEntity(
200 {'pointer': '/data/attributes/device_name_checkin'},
201 "Check in Time missing for the corresponding device name"
202 )
203 else:
204 if obj.device_name_checkin is not None:
205 data['device_name_checkin'] = '{},{}'.format(obj.device_name_checkin, '-')
206 else:
207 data['device_name_checkin'] = '-'
208
209 if 'is_checked_out' in data and data['is_checked_out']:
210 attendee = safe_query(db, TicketHolder, 'id', kwargs['id'], 'attendee_id')
211 if not attendee.is_checked_out:
212 checkout_times = obj.checkout_times.split(',') if obj.checkout_times else []
213 checkout_times.append(str(datetime.utcnow()))
214 data['checkout_times'] = ','.join(checkout_times)
215
216 if 'attendee_notes' in data:
217 if obj.attendee_notes and data['attendee_notes'] not in obj.attendee_notes.split(","):
218 data['attendee_notes'] = '{},{}'.format(obj.attendee_notes, data['attendee_notes'])
219
220 decorators = (jwt_required,)
221 schema = AttendeeSchema
222 data_layer = {'session': db.session,
223 'model': TicketHolder,
224 'methods': {
225 'before_get_object': before_get_object,
226 'before_update_object': before_update_object,
227 'before_delete_object': before_delete_object
228 }}
229
230
231 class AttendeeRelationshipRequired(ResourceRelationship):
232 """
233 Attendee Relationship (Required)
234 """
235 decorators = (jwt_required,)
236 methods = ['GET', 'PATCH']
237 schema = AttendeeSchema
238 data_layer = {'session': db.session,
239 'model': TicketHolder}
240
241
242 class AttendeeRelationshipOptional(ResourceRelationship):
243 """
244 Attendee Relationship(Optional)
245 """
246 decorators = (api.has_permission('is_user_itself', fetch="user_id", fetch_as="id", model=TicketHolder),)
247 schema = AttendeeSchema
248 data_layer = {'session': db.session,
249 'model': TicketHolder}
250
251
252 @attendee_misc_routes.route('/attendees/send-receipt', methods=['POST'])
253 @jwt_required
254 def send_receipt():
255 """
256 Send receipts to attendees related to the provided order.
257 :return:
258 """
259 order_identifier = request.json.get('order-identifier')
260 if order_identifier:
261 try:
262 order = db.session.query(Order).filter_by(identifier=order_identifier).one()
263 except NoResultFound:
264 raise ObjectNotFound({'parameter': '{identifier}'}, "Order not found")
265
266 if (order.user_id != current_identity.id) and (not has_access('is_registrar', event_id=order.event_id)):
267 abort(
268 make_response(jsonify(error="You need to be the event organizer or order buyer to send receipts."), 403)
269 )
270 elif order.status != 'completed':
271 abort(
272 make_response(jsonify(error="Cannot send receipt for an incomplete order"), 409)
273 )
274 else:
275 send_email_to_attendees(order, current_identity.id)
276 return jsonify(message="receipt sent to attendees")
277 else:
278 abort(
279 make_response(jsonify(error="Order identifier missing"), 422)
280 )
281
[end of app/api/attendees.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/attendees.py b/app/api/attendees.py
--- a/app/api/attendees.py
+++ b/app/api/attendees.py
@@ -112,8 +112,8 @@
if view_kwargs.get('ticket_id'):
ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
- if not has_access('is_registrar', event_id=ticket.event_id):
- raise ForbiddenException({'source': ''}, 'Access Forbidden')
+ # if not has_access('is_registrar', event_id=ticket.event_id):
+ # raise ForbiddenException({'source': ''}, 'Access Forbidden')
query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)
if view_kwargs.get('user_id'):
| {"golden_diff": "diff --git a/app/api/attendees.py b/app/api/attendees.py\n--- a/app/api/attendees.py\n+++ b/app/api/attendees.py\n@@ -112,8 +112,8 @@\n \n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n- if not has_access('is_registrar', event_id=ticket.event_id):\n- raise ForbiddenException({'source': ''}, 'Access Forbidden')\n+ # if not has_access('is_registrar', event_id=ticket.event_id):\n+ # raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n \n if view_kwargs.get('user_id'):\n", "issue": "Allow GET access to ticket attendees for an order, for non logged in users\nparent issue #5546 \n", "before_files": [{"content": "from datetime import datetime\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity, jwt_required\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import (\n ConflictException,\n ForbiddenException,\n UnprocessableEntity,\n)\nfrom app.api.helpers.mail import send_email_to_attendees\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\nattendee_misc_routes = Blueprint('attendee_misc', __name__, url_prefix='/v1')\n\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n Before post method to check for required relationship and proper permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['ticket', 'event'], data)\n\n ticket = db.session.query(Ticket).filter_by(\n id=int(data['ticket']), deleted_at=None\n ).first()\n if ticket is None:\n raise UnprocessableEntity(\n {'pointer': '/data/relationships/ticket'}, \"Invalid Ticket\"\n )\n if ticket.event_id != int(data['event']):\n raise UnprocessableEntity(\n {'pointer': '/data/relationships/ticket'},\n \"Ticket belongs to a different Event\"\n )\n # Check if the ticket is already sold out or not.\n if get_count(db.session.query(TicketHolder.id).\n filter_by(ticket_id=int(data['ticket']), deleted_at=None)) >= ticket.quantity:\n raise ConflictException(\n {'pointer': '/data/attributes/ticket_id'},\n \"Ticket already sold out\"\n )\n\n if 'device_name_checkin' in data and data['device_name_checkin'] is not None:\n if 'is_checked_in' not in data or not data['is_checked_in']:\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/device_name_checkin'},\n \"Attendee needs to be checked in first\"\n )\n elif 'checkin_times' not in data or data['checkin_times'] is None:\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/device_name_checkin'},\n \"Check in Times missing\"\n )\n elif len(data['checkin_times'].split(\",\")) != len(data['device_name_checkin'].split(\",\")):\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/device_name_checkin'},\n \"Check in Times missing for the corresponding device name\"\n )\n\n if 'checkin_times' in data:\n if 'device_name_checkin' not in data or data['device_name_checkin'] is None:\n data['device_name_checkin'] = '-'\n\n decorators = (jwt_required,)\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for Attendees List\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) and not has_access('is_user_itself',\n user_id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method for attendee detail\n :param view_kwargs:\n :return:\n \"\"\"\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n before delete object method for attendee detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n \"\"\"\n before update object method for attendee detail\n :param obj:\n :param data:\n :param kwargs:\n :return:\n \"\"\"\n# if not has_access('is_registrar', event_id=obj.event_id):\n# raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n if 'device_name_checkin' in data:\n if 'checkin_times' not in data or data['checkin_times'] is None:\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/device_name_checkin'},\n \"Check in Times missing\"\n )\n\n if 'is_checked_in' in data and data['is_checked_in']:\n if 'checkin_times' not in data or data['checkin_times'] is None:\n raise UnprocessableEntity({'pointer': '/data/attributes/checkin_times'},\n \"Check in time missing while trying to check in attendee\")\n else:\n if obj.checkin_times and data['checkin_times'] not in obj.checkin_times.split(\",\"):\n data['checkin_times'] = '{},{}'.format(obj.checkin_times, data['checkin_times'])\n elif obj.checkin_times and data['checkin_times'] in obj.checkin_times.split(\",\"):\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/checkin_times'},\n \"Check in time already present\"\n )\n\n if 'device_name_checkin' in data and data['device_name_checkin'] is not None:\n if obj.device_name_checkin is not None:\n data['device_name_checkin'] = '{},{}'.format(obj.device_name_checkin,\n data['device_name_checkin'])\n\n if len(data['checkin_times'].split(\",\")) != len(data['device_name_checkin'].split(\",\")):\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/device_name_checkin'},\n \"Check in Time missing for the corresponding device name\"\n )\n else:\n if obj.device_name_checkin is not None:\n data['device_name_checkin'] = '{},{}'.format(obj.device_name_checkin, '-')\n else:\n data['device_name_checkin'] = '-'\n\n if 'is_checked_out' in data and data['is_checked_out']:\n attendee = safe_query(db, TicketHolder, 'id', kwargs['id'], 'attendee_id')\n if not attendee.is_checked_out:\n checkout_times = obj.checkout_times.split(',') if obj.checkout_times else []\n checkout_times.append(str(datetime.utcnow()))\n data['checkout_times'] = ','.join(checkout_times)\n\n if 'attendee_notes' in data:\n if obj.attendee_notes and data['attendee_notes'] not in obj.attendee_notes.split(\",\"):\n data['attendee_notes'] = '{},{}'.format(obj.attendee_notes, data['attendee_notes'])\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\n@attendee_misc_routes.route('/attendees/send-receipt', methods=['POST'])\n@jwt_required\ndef send_receipt():\n \"\"\"\n Send receipts to attendees related to the provided order.\n :return:\n \"\"\"\n order_identifier = request.json.get('order-identifier')\n if order_identifier:\n try:\n order = db.session.query(Order).filter_by(identifier=order_identifier).one()\n except NoResultFound:\n raise ObjectNotFound({'parameter': '{identifier}'}, \"Order not found\")\n\n if (order.user_id != current_identity.id) and (not has_access('is_registrar', event_id=order.event_id)):\n abort(\n make_response(jsonify(error=\"You need to be the event organizer or order buyer to send receipts.\"), 403)\n )\n elif order.status != 'completed':\n abort(\n make_response(jsonify(error=\"Cannot send receipt for an incomplete order\"), 409)\n )\n else:\n send_email_to_attendees(order, current_identity.id)\n return jsonify(message=\"receipt sent to attendees\")\n else:\n abort(\n make_response(jsonify(error=\"Order identifier missing\"), 422)\n )\n", "path": "app/api/attendees.py"}]} | 3,688 | 177 |
gh_patches_debug_8208 | rasdani/github-patches | git_diff | learningequality__kolibri-1529 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
<Enhancement> A pop up on the screen when a learner masters an exercise would be great.
## Summary
<Enhancement> A pop up on the screen when a learner masters an exercise would be great.
Presently the learner is not aware if he/she has mastered the topic, since the point addition happens very subtly.
Hence a Pop up on screen that would read "You have Mastered the Exercise and earned 500 points, Keep Going" would be great. Anything even shorter would be great.
Its just an on screen pop up would be great.
## System information
- Version: 0.4.0Beta1
- Operating system: Linus LTS 14.04
- Browser: Chrome
## How to reproduce
1. Master an exercise on a tablet.
2. You not know if you have mastered the topic clearly .
## Screenshots


</issue>
<code>
[start of kolibri/content/api.py]
1 from functools import reduce
2 from random import sample
3
4 from django.core.cache import cache
5 from django.db.models import Q
6 from django.db.models.aggregates import Count
7 from kolibri.content import models, serializers
8 from kolibri.content.content_db_router import get_active_content_database
9 from kolibri.logger.models import ContentSessionLog, ContentSummaryLog
10 from le_utils.constants import content_kinds
11 from rest_framework import filters, pagination, viewsets
12 from rest_framework.decorators import detail_route
13 from rest_framework.response import Response
14
15 from .utils.search import fuzz
16
17 def _join_with_logical_operator(lst, operator):
18 op = ") {operator} (".format(operator=operator)
19 return "(({items}))".format(items=op.join(lst))
20
21 class ChannelMetadataCacheViewSet(viewsets.ModelViewSet):
22 serializer_class = serializers.ChannelMetadataCacheSerializer
23
24 def get_queryset(self):
25 return models.ChannelMetadataCache.objects.all()
26
27
28 class ContentNodeFilter(filters.FilterSet):
29 search = filters.django_filters.MethodFilter(action='title_description_filter')
30 recommendations_for = filters.django_filters.MethodFilter()
31 next_steps = filters.django_filters.MethodFilter()
32 popular = filters.django_filters.MethodFilter()
33 resume = filters.django_filters.MethodFilter()
34 kind = filters.django_filters.MethodFilter()
35 ids = filters.django_filters.MethodFilter()
36
37 class Meta:
38 model = models.ContentNode
39 fields = ['parent', 'search', 'prerequisite_for', 'has_prerequisite', 'related', 'recommendations_for', 'ids', 'content_id']
40
41 def title_description_filter(self, queryset, value):
42 """
43 search for title or description that contains the keywords that are not necessary in adjacent
44 """
45 exact_match = queryset.filter(Q(parent__isnull=False), Q(title__icontains=value) | Q(description__icontains=value))
46 if exact_match:
47 return exact_match
48 # if no exact match, fuzzy search using the stemmed_metaphone field in ContentNode that covers the title and description
49 fuzzed_tokens = [fuzz(word) for word in value.split()]
50 if not fuzzed_tokens[0]:
51 return []
52 token_queries = [reduce(lambda x, y: x | y, [Q(stemmed_metaphone__contains=token) for token in tokens]) for tokens in fuzzed_tokens]
53 return queryset.filter(
54 Q(parent__isnull=False),
55 reduce(lambda x, y: x & y, token_queries))
56
57 def filter_recommendations_for(self, queryset, value):
58 """
59 Recommend items that are similar to this piece of content.
60 """
61 recc_node = queryset.get(pk=value)
62 descendants = recc_node.get_descendants(include_self=False).exclude(kind__in=['topic', ''])
63 siblings = recc_node.get_siblings(include_self=False).exclude(kind__in=['topic', ''])
64 data = descendants | siblings # concatenates different querysets
65 return data
66
67 def filter_next_steps(self, queryset, value):
68 """
69 Recommend uncompleted content, content that has user completed content as a prerequisite.
70
71 :param queryset: all content nodes for this channel
72 :param value: id of currently logged in user, or none if user is anonymous
73 :return: uncompleted content nodes, or empty queryset if user is anonymous
74 """
75
76 # if user is anonymous, don't return any nodes
77 if not value:
78 return queryset.none()
79
80 tables = [
81 '"{summarylog_table}" AS "complete_log"',
82 '"{summarylog_table}" AS "incomplete_log"',
83 '"{content_table}" AS "complete_node"',
84 '"{content_table}" AS "incomplete_node"',
85 ]
86 table_names = {
87 "summarylog_table": ContentSummaryLog._meta.db_table,
88 "content_table": models.ContentNode._meta.db_table,
89 }
90 # aliases for sql table names
91 sql_tables_and_aliases = [table.format(**table_names) for table in tables]
92 # where conditions joined by ANDs
93 where_statements = ["NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)",
94 "complete_log.user_id = '{user_id}'".format(user_id=value),
95 "incomplete_log.user_id = '{user_id}'".format(user_id=value),
96 "complete_log.progress = 1",
97 "complete_node.rght = incomplete_node.lft - 1",
98 "complete_log.content_id = complete_node.content_id"]
99 # custom SQL query to get uncompleted content based on mptt algorithm
100 next_steps_recommendations = "SELECT incomplete_node.* FROM {tables} WHERE {where}".format(
101 tables=", ".join(sql_tables_and_aliases),
102 where=_join_with_logical_operator(where_statements, "AND")
103 )
104 return models.ContentNode.objects.raw(next_steps_recommendations)
105
106 def filter_popular(self, queryset, value):
107 """
108 Recommend content that is popular with all users.
109
110 :param queryset: all content nodes for this channel
111 :param value: id of currently logged in user, or none if user is anonymous
112 :return: 10 most popular content nodes
113 """
114 if ContentSessionLog.objects.count() < 50:
115 # return 25 random content nodes if not enough session logs
116 pks = queryset.values_list('pk', flat=True).exclude(kind__in=['topic', ''])
117 count = min(pks.count(), 25)
118 return queryset.filter(pk__in=sample(list(pks), count))
119
120 cache_key = 'popular_for_{}'.format(get_active_content_database())
121 if cache.get(cache_key):
122 return cache.get(cache_key)
123
124 # get the most accessed content nodes
125 content_counts_sorted = ContentSessionLog.objects \
126 .filter(channel_id=get_active_content_database()) \
127 .values_list('content_id', flat=True) \
128 .annotate(Count('content_id')) \
129 .order_by('-content_id__count')
130
131 most_popular = queryset.filter(content_id__in=list(content_counts_sorted[:10]))
132
133 # cache the popular results queryset for 10 minutes, for efficiency
134 cache.set(cache_key, most_popular, 60 * 10)
135 return most_popular
136
137 def filter_resume(self, queryset, value):
138 """
139 Recommend content that the user has recently engaged with, but not finished.
140
141 :param queryset: all content nodes for this channel
142 :param value: id of currently logged in user, or none if user is anonymous
143 :return: 10 most recently viewed content nodes
144 """
145
146 # if user is anonymous, return no nodes
147 if not value:
148 return queryset.none()
149
150 # get the most recently viewed, but not finished, content nodes
151 content_ids = ContentSummaryLog.objects \
152 .filter(user=value, channel_id=get_active_content_database()) \
153 .exclude(progress=1) \
154 .order_by('end_timestamp') \
155 .values_list('content_id', flat=True) \
156 .distinct()
157
158 resume = queryset.filter(content_id__in=list(content_ids[:10]))
159
160 return resume
161
162 def filter_kind(self, queryset, value):
163 """
164 Show only content of a given kind.
165
166 :param queryset: all content nodes for this channel
167 :param value: 'content' for everything except topics, or one of the content kind constants
168 :return: content nodes of the given kind
169 """
170 if value == 'content':
171 return queryset.exclude(kind=content_kinds.TOPIC).order_by("lft")
172 return queryset.filter(kind=value).order_by("lft")
173
174 def filter_ids(self, queryset, value):
175 return queryset.filter(pk__in=value.split(','))
176
177
178 class OptionalPageNumberPagination(pagination.PageNumberPagination):
179 """
180 Pagination class that allows for page number-style pagination, when requested.
181 To activate, the `page_size` argument must be set. For example, to request the first 20 records:
182 `?page_size=20&page=1`
183 """
184 page_size = None
185 page_size_query_param = "page_size"
186
187
188 class ContentNodeViewset(viewsets.ModelViewSet):
189 serializer_class = serializers.ContentNodeSerializer
190 filter_backends = (filters.DjangoFilterBackend,)
191 filter_class = ContentNodeFilter
192 pagination_class = OptionalPageNumberPagination
193
194 def get_queryset(self):
195 return models.ContentNode.objects.all().select_related(
196 'parent',
197 'license',
198 ).prefetch_related(
199 'assessmentmetadata',
200 'files',
201 )
202
203 @detail_route(methods=['get'])
204 def descendants(self, request, **kwargs):
205 node = self.get_object()
206 kind = self.request.query_params.get('descendant_kind', None)
207 descendants = node.get_descendants()
208 if kind:
209 descendants = descendants.filter(kind=kind)
210
211 serializer = self.get_serializer(descendants, many=True)
212 return Response(serializer.data)
213
214 @detail_route(methods=['get'])
215 def ancestors(self, request, **kwargs):
216 return Response(self.get_object().get_ancestors().values('pk', 'title'))
217
218 @detail_route(methods=['get'])
219 def next_content(self, request, **kwargs):
220 # retrieve the "next" content node, according to depth-first tree traversal
221 this_item = self.get_object()
222 next_item = models.ContentNode.objects.filter(tree_id=this_item.tree_id, lft__gt=this_item.rght).order_by("lft").first()
223 if not next_item:
224 next_item = this_item.get_root()
225 return Response({'kind': next_item.kind, 'id': next_item.id})
226
227
228 class FileViewset(viewsets.ModelViewSet):
229 serializer_class = serializers.FileSerializer
230 pagination_class = OptionalPageNumberPagination
231
232 def get_queryset(self):
233 return models.File.objects.all()
234
[end of kolibri/content/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/content/api.py b/kolibri/content/api.py
--- a/kolibri/content/api.py
+++ b/kolibri/content/api.py
@@ -222,7 +222,7 @@
next_item = models.ContentNode.objects.filter(tree_id=this_item.tree_id, lft__gt=this_item.rght).order_by("lft").first()
if not next_item:
next_item = this_item.get_root()
- return Response({'kind': next_item.kind, 'id': next_item.id})
+ return Response({'kind': next_item.kind, 'id': next_item.id, 'title': next_item.title})
class FileViewset(viewsets.ModelViewSet):
| {"golden_diff": "diff --git a/kolibri/content/api.py b/kolibri/content/api.py\n--- a/kolibri/content/api.py\n+++ b/kolibri/content/api.py\n@@ -222,7 +222,7 @@\n next_item = models.ContentNode.objects.filter(tree_id=this_item.tree_id, lft__gt=this_item.rght).order_by(\"lft\").first()\n if not next_item:\n next_item = this_item.get_root()\n- return Response({'kind': next_item.kind, 'id': next_item.id})\n+ return Response({'kind': next_item.kind, 'id': next_item.id, 'title': next_item.title})\n \n \n class FileViewset(viewsets.ModelViewSet):\n", "issue": "<Enhancement> A pop up on the screen when a learner masters an exercise would be great.\n## Summary\r\n\r\n<Enhancement> A pop up on the screen when a learner masters an exercise would be great.\r\n\r\nPresently the learner is not aware if he/she has mastered the topic, since the point addition happens very subtly.\r\nHence a Pop up on screen that would read \"You have Mastered the Exercise and earned 500 points, Keep Going\" would be great. Anything even shorter would be great. \r\n\r\nIts just an on screen pop up would be great.\r\n\r\n## System information\r\n\r\n - Version: 0.4.0Beta1\r\n - Operating system: Linus LTS 14.04\r\n - Browser: Chrome\r\n\r\n## How to reproduce\r\n\r\n1. Master an exercise on a tablet.\r\n2. You not know if you have mastered the topic clearly .\r\n\r\n## Screenshots\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from functools import reduce\nfrom random import sample\n\nfrom django.core.cache import cache\nfrom django.db.models import Q\nfrom django.db.models.aggregates import Count\nfrom kolibri.content import models, serializers\nfrom kolibri.content.content_db_router import get_active_content_database\nfrom kolibri.logger.models import ContentSessionLog, ContentSummaryLog\nfrom le_utils.constants import content_kinds\nfrom rest_framework import filters, pagination, viewsets\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom .utils.search import fuzz\n\ndef _join_with_logical_operator(lst, operator):\n op = \") {operator} (\".format(operator=operator)\n return \"(({items}))\".format(items=op.join(lst))\n\nclass ChannelMetadataCacheViewSet(viewsets.ModelViewSet):\n serializer_class = serializers.ChannelMetadataCacheSerializer\n\n def get_queryset(self):\n return models.ChannelMetadataCache.objects.all()\n\n\nclass ContentNodeFilter(filters.FilterSet):\n search = filters.django_filters.MethodFilter(action='title_description_filter')\n recommendations_for = filters.django_filters.MethodFilter()\n next_steps = filters.django_filters.MethodFilter()\n popular = filters.django_filters.MethodFilter()\n resume = filters.django_filters.MethodFilter()\n kind = filters.django_filters.MethodFilter()\n ids = filters.django_filters.MethodFilter()\n\n class Meta:\n model = models.ContentNode\n fields = ['parent', 'search', 'prerequisite_for', 'has_prerequisite', 'related', 'recommendations_for', 'ids', 'content_id']\n\n def title_description_filter(self, queryset, value):\n \"\"\"\n search for title or description that contains the keywords that are not necessary in adjacent\n \"\"\"\n exact_match = queryset.filter(Q(parent__isnull=False), Q(title__icontains=value) | Q(description__icontains=value))\n if exact_match:\n return exact_match\n # if no exact match, fuzzy search using the stemmed_metaphone field in ContentNode that covers the title and description\n fuzzed_tokens = [fuzz(word) for word in value.split()]\n if not fuzzed_tokens[0]:\n return []\n token_queries = [reduce(lambda x, y: x | y, [Q(stemmed_metaphone__contains=token) for token in tokens]) for tokens in fuzzed_tokens]\n return queryset.filter(\n Q(parent__isnull=False),\n reduce(lambda x, y: x & y, token_queries))\n\n def filter_recommendations_for(self, queryset, value):\n \"\"\"\n Recommend items that are similar to this piece of content.\n \"\"\"\n recc_node = queryset.get(pk=value)\n descendants = recc_node.get_descendants(include_self=False).exclude(kind__in=['topic', ''])\n siblings = recc_node.get_siblings(include_self=False).exclude(kind__in=['topic', ''])\n data = descendants | siblings # concatenates different querysets\n return data\n\n def filter_next_steps(self, queryset, value):\n \"\"\"\n Recommend uncompleted content, content that has user completed content as a prerequisite.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: uncompleted content nodes, or empty queryset if user is anonymous\n \"\"\"\n\n # if user is anonymous, don't return any nodes\n if not value:\n return queryset.none()\n\n tables = [\n '\"{summarylog_table}\" AS \"complete_log\"',\n '\"{summarylog_table}\" AS \"incomplete_log\"',\n '\"{content_table}\" AS \"complete_node\"',\n '\"{content_table}\" AS \"incomplete_node\"',\n ]\n table_names = {\n \"summarylog_table\": ContentSummaryLog._meta.db_table,\n \"content_table\": models.ContentNode._meta.db_table,\n }\n # aliases for sql table names\n sql_tables_and_aliases = [table.format(**table_names) for table in tables]\n # where conditions joined by ANDs\n where_statements = [\"NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)\",\n \"complete_log.user_id = '{user_id}'\".format(user_id=value),\n \"incomplete_log.user_id = '{user_id}'\".format(user_id=value),\n \"complete_log.progress = 1\",\n \"complete_node.rght = incomplete_node.lft - 1\",\n \"complete_log.content_id = complete_node.content_id\"]\n # custom SQL query to get uncompleted content based on mptt algorithm\n next_steps_recommendations = \"SELECT incomplete_node.* FROM {tables} WHERE {where}\".format(\n tables=\", \".join(sql_tables_and_aliases),\n where=_join_with_logical_operator(where_statements, \"AND\")\n )\n return models.ContentNode.objects.raw(next_steps_recommendations)\n\n def filter_popular(self, queryset, value):\n \"\"\"\n Recommend content that is popular with all users.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: 10 most popular content nodes\n \"\"\"\n if ContentSessionLog.objects.count() < 50:\n # return 25 random content nodes if not enough session logs\n pks = queryset.values_list('pk', flat=True).exclude(kind__in=['topic', ''])\n count = min(pks.count(), 25)\n return queryset.filter(pk__in=sample(list(pks), count))\n\n cache_key = 'popular_for_{}'.format(get_active_content_database())\n if cache.get(cache_key):\n return cache.get(cache_key)\n\n # get the most accessed content nodes\n content_counts_sorted = ContentSessionLog.objects \\\n .filter(channel_id=get_active_content_database()) \\\n .values_list('content_id', flat=True) \\\n .annotate(Count('content_id')) \\\n .order_by('-content_id__count')\n\n most_popular = queryset.filter(content_id__in=list(content_counts_sorted[:10]))\n\n # cache the popular results queryset for 10 minutes, for efficiency\n cache.set(cache_key, most_popular, 60 * 10)\n return most_popular\n\n def filter_resume(self, queryset, value):\n \"\"\"\n Recommend content that the user has recently engaged with, but not finished.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: 10 most recently viewed content nodes\n \"\"\"\n\n # if user is anonymous, return no nodes\n if not value:\n return queryset.none()\n\n # get the most recently viewed, but not finished, content nodes\n content_ids = ContentSummaryLog.objects \\\n .filter(user=value, channel_id=get_active_content_database()) \\\n .exclude(progress=1) \\\n .order_by('end_timestamp') \\\n .values_list('content_id', flat=True) \\\n .distinct()\n\n resume = queryset.filter(content_id__in=list(content_ids[:10]))\n\n return resume\n\n def filter_kind(self, queryset, value):\n \"\"\"\n Show only content of a given kind.\n\n :param queryset: all content nodes for this channel\n :param value: 'content' for everything except topics, or one of the content kind constants\n :return: content nodes of the given kind\n \"\"\"\n if value == 'content':\n return queryset.exclude(kind=content_kinds.TOPIC).order_by(\"lft\")\n return queryset.filter(kind=value).order_by(\"lft\")\n\n def filter_ids(self, queryset, value):\n return queryset.filter(pk__in=value.split(','))\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass ContentNodeViewset(viewsets.ModelViewSet):\n serializer_class = serializers.ContentNodeSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = ContentNodeFilter\n pagination_class = OptionalPageNumberPagination\n\n def get_queryset(self):\n return models.ContentNode.objects.all().select_related(\n 'parent',\n 'license',\n ).prefetch_related(\n 'assessmentmetadata',\n 'files',\n )\n\n @detail_route(methods=['get'])\n def descendants(self, request, **kwargs):\n node = self.get_object()\n kind = self.request.query_params.get('descendant_kind', None)\n descendants = node.get_descendants()\n if kind:\n descendants = descendants.filter(kind=kind)\n\n serializer = self.get_serializer(descendants, many=True)\n return Response(serializer.data)\n\n @detail_route(methods=['get'])\n def ancestors(self, request, **kwargs):\n return Response(self.get_object().get_ancestors().values('pk', 'title'))\n\n @detail_route(methods=['get'])\n def next_content(self, request, **kwargs):\n # retrieve the \"next\" content node, according to depth-first tree traversal\n this_item = self.get_object()\n next_item = models.ContentNode.objects.filter(tree_id=this_item.tree_id, lft__gt=this_item.rght).order_by(\"lft\").first()\n if not next_item:\n next_item = this_item.get_root()\n return Response({'kind': next_item.kind, 'id': next_item.id})\n\n\nclass FileViewset(viewsets.ModelViewSet):\n serializer_class = serializers.FileSerializer\n pagination_class = OptionalPageNumberPagination\n\n def get_queryset(self):\n return models.File.objects.all()\n", "path": "kolibri/content/api.py"}]} | 3,562 | 151 |
gh_patches_debug_18220 | rasdani/github-patches | git_diff | lra__mackup-1359 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Critical error! Using .mackup as directory option deletes application configs
I used the following config as `~/.mackup.cfg`
```
[storage]
engine = file_system
path = /Users/myuser
directory = .mackup
```
And running `mackup backup` deleted all my application configs because [this step](https://github.com/lra/mackup/blob/master/mackup/application.py#L114-L116) doesn't check if `home_filepath` is equal to `mackup_filepath` therefore copying files to `.mackup` then deleting `.mackup`...
Thanks a lot :(
</issue>
<code>
[start of mackup/config.py]
1 """Package used to manage the .mackup.cfg config file."""
2
3 import os
4 import os.path
5
6 from .constants import (MACKUP_BACKUP_PATH,
7 MACKUP_CONFIG_FILE,
8 ENGINE_DROPBOX,
9 ENGINE_GDRIVE,
10 ENGINE_COPY,
11 ENGINE_ICLOUD,
12 ENGINE_BOX,
13 ENGINE_FS)
14
15 from .utils import (error,
16 get_dropbox_folder_location,
17 get_copy_folder_location,
18 get_google_drive_folder_location,
19 get_icloud_folder_location,
20 get_box_folder_location)
21 try:
22 import configparser
23 except ImportError:
24 import ConfigParser as configparser
25
26
27 class Config(object):
28
29 """The Mackup Config class."""
30
31 def __init__(self, filename=None):
32 """
33 Create a Config instance.
34
35 Args:
36 filename (str): Optional filename of the config file. If empty,
37 defaults to MACKUP_CONFIG_FILE
38 """
39 assert isinstance(filename, str) or filename is None
40
41 # Initialize the parser
42 self._parser = self._setup_parser(filename)
43
44 # Do we have an old config file ?
45 self._warn_on_old_config()
46
47 # Get the storage engine
48 self._engine = self._parse_engine()
49
50 # Get the path where the Mackup folder is
51 self._path = self._parse_path()
52
53 # Get the directory replacing 'Mackup', if any
54 self._directory = self._parse_directory()
55
56 # Get the list of apps to ignore
57 self._apps_to_ignore = self._parse_apps_to_ignore()
58
59 # Get the list of apps to allow
60 self._apps_to_sync = self._parse_apps_to_sync()
61
62 @property
63 def engine(self):
64 """
65 The engine used by the storage.
66
67 ENGINE_DROPBOX, ENGINE_GDRIVE, ENGINE_COPY, ENGINE_ICLOUD, ENGINE_BOX
68 or ENGINE_FS.
69
70 Returns:
71 str
72 """
73 return str(self._engine)
74
75 @property
76 def path(self):
77 """
78 Path to the Mackup configuration files.
79
80 The path to the directory where Mackup is gonna create and store his
81 directory.
82
83 Returns:
84 str
85 """
86 return str(self._path)
87
88 @property
89 def directory(self):
90 """
91 The name of the Mackup directory, named Mackup by default.
92
93 Returns:
94 str
95 """
96 return str(self._directory)
97
98 @property
99 def fullpath(self):
100 """
101 Full path to the Mackup configuration files.
102
103 The full path to the directory when Mackup is storing the configuration
104 files.
105
106 Returns:
107 str
108 """
109 return str(os.path.join(self.path, self.directory))
110
111 @property
112 def apps_to_ignore(self):
113 """
114 Get the list of applications ignored in the config file.
115
116 Returns:
117 set. Set of application names to ignore, lowercase
118 """
119 return set(self._apps_to_ignore)
120
121 @property
122 def apps_to_sync(self):
123 """
124 Get the list of applications allowed in the config file.
125
126 Returns:
127 set. Set of application names to allow, lowercase
128 """
129 return set(self._apps_to_sync)
130
131 def _setup_parser(self, filename=None):
132 """
133 Configure the ConfigParser instance the way we want it.
134
135 Args:
136 filename (str) or None
137
138 Returns:
139 SafeConfigParser
140 """
141 assert isinstance(filename, str) or filename is None
142
143 # If we are not overriding the config filename
144 if not filename:
145 filename = MACKUP_CONFIG_FILE
146
147 parser = configparser.SafeConfigParser(allow_no_value=True)
148 parser.read(os.path.join(os.path.join(os.environ['HOME'], filename)))
149
150 return parser
151
152 def _warn_on_old_config(self):
153 """Warn the user if an old config format is detected."""
154 # Is an old setion is in the config file ?
155 old_sections = ['Allowed Applications', 'Ignored Applications']
156 for old_section in old_sections:
157 if self._parser.has_section(old_section):
158 error("Old config file detected. Aborting.\n"
159 "\n"
160 "An old section (e.g. [Allowed Applications]"
161 " or [Ignored Applications] has been detected"
162 " in your {} file.\n"
163 "I'd rather do nothing than do something you"
164 " do not want me to do.\n"
165 "\n"
166 "Please read the up to date documentation on"
167 " <https://github.com/lra/mackup> and migrate"
168 " your configuration file."
169 .format(MACKUP_CONFIG_FILE))
170
171 def _parse_engine(self):
172 """
173 Parse the storage engine in the config.
174
175 Returns:
176 str
177 """
178 if self._parser.has_option('storage', 'engine'):
179 engine = str(self._parser.get('storage', 'engine'))
180 else:
181 engine = ENGINE_DROPBOX
182
183 assert isinstance(engine, str)
184
185 if engine not in [ENGINE_DROPBOX,
186 ENGINE_GDRIVE,
187 ENGINE_COPY,
188 ENGINE_ICLOUD,
189 ENGINE_BOX,
190 ENGINE_FS]:
191 raise ConfigError('Unknown storage engine: {}'.format(engine))
192
193 return str(engine)
194
195 def _parse_path(self):
196 """
197 Parse the storage path in the config.
198
199 Returns:
200 str
201 """
202 if self.engine == ENGINE_DROPBOX:
203 path = get_dropbox_folder_location()
204 elif self.engine == ENGINE_GDRIVE:
205 path = get_google_drive_folder_location()
206 elif self.engine == ENGINE_COPY:
207 path = get_copy_folder_location()
208 elif self.engine == ENGINE_ICLOUD:
209 path = get_icloud_folder_location()
210 elif self.engine == ENGINE_BOX:
211 path = get_box_folder_location()
212 elif self.engine == ENGINE_FS:
213 if self._parser.has_option('storage', 'path'):
214 cfg_path = self._parser.get('storage', 'path')
215 path = os.path.join(os.environ['HOME'], cfg_path)
216 else:
217 raise ConfigError("The required 'path' can't be found while"
218 " the 'file_system' engine is used.")
219
220 return str(path)
221
222 def _parse_directory(self):
223 """
224 Parse the storage directory in the config.
225
226 Returns:
227 str
228 """
229 if self._parser.has_option('storage', 'directory'):
230 directory = self._parser.get('storage', 'directory')
231 else:
232 directory = MACKUP_BACKUP_PATH
233
234 return str(directory)
235
236 def _parse_apps_to_ignore(self):
237 """
238 Parse the applications to ignore in the config.
239
240 Returns:
241 set
242 """
243 # We ignore nothing by default
244 apps_to_ignore = set()
245
246 # Is the "[applications_to_ignore]" in the cfg file ?
247 section_title = 'applications_to_ignore'
248 if self._parser.has_section(section_title):
249 apps_to_ignore = set(self._parser.options(section_title))
250
251 return apps_to_ignore
252
253 def _parse_apps_to_sync(self):
254 """
255 Parse the applications to backup in the config.
256
257 Returns:
258 set
259 """
260 # We allow nothing by default
261 apps_to_sync = set()
262
263 # Is the "[applications_to_sync]" section in the cfg file ?
264 section_title = 'applications_to_sync'
265 if self._parser.has_section(section_title):
266 apps_to_sync = set(self._parser.options(section_title))
267
268 return apps_to_sync
269
270
271 class ConfigError(Exception):
272
273 """Exception used for handle errors in the configuration."""
274
275 pass
276
[end of mackup/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mackup/config.py b/mackup/config.py
--- a/mackup/config.py
+++ b/mackup/config.py
@@ -3,7 +3,8 @@
import os
import os.path
-from .constants import (MACKUP_BACKUP_PATH,
+from .constants import (CUSTOM_APPS_DIR,
+ MACKUP_BACKUP_PATH,
MACKUP_CONFIG_FILE,
ENGINE_DROPBOX,
ENGINE_GDRIVE,
@@ -228,6 +229,10 @@
"""
if self._parser.has_option('storage', 'directory'):
directory = self._parser.get('storage', 'directory')
+ # Don't allow CUSTOM_APPS_DIR as a storage directory
+ if directory == CUSTOM_APPS_DIR:
+ raise ConfigError("{} cannot be used as a storage directory."
+ .format(CUSTOM_APPS_DIR))
else:
directory = MACKUP_BACKUP_PATH
| {"golden_diff": "diff --git a/mackup/config.py b/mackup/config.py\n--- a/mackup/config.py\n+++ b/mackup/config.py\n@@ -3,7 +3,8 @@\n import os\n import os.path\n \n-from .constants import (MACKUP_BACKUP_PATH,\n+from .constants import (CUSTOM_APPS_DIR,\n+ MACKUP_BACKUP_PATH,\n MACKUP_CONFIG_FILE,\n ENGINE_DROPBOX,\n ENGINE_GDRIVE,\n@@ -228,6 +229,10 @@\n \"\"\"\n if self._parser.has_option('storage', 'directory'):\n directory = self._parser.get('storage', 'directory')\n+ # Don't allow CUSTOM_APPS_DIR as a storage directory\n+ if directory == CUSTOM_APPS_DIR:\n+ raise ConfigError(\"{} cannot be used as a storage directory.\"\n+ .format(CUSTOM_APPS_DIR))\n else:\n directory = MACKUP_BACKUP_PATH\n", "issue": "Critical error! Using .mackup as directory option deletes application configs\nI used the following config as `~/.mackup.cfg` \r\n```\r\n[storage]\r\nengine = file_system\r\npath = /Users/myuser\r\ndirectory = .mackup\r\n```\r\n\r\nAnd running `mackup backup` deleted all my application configs because [this step](https://github.com/lra/mackup/blob/master/mackup/application.py#L114-L116) doesn't check if `home_filepath` is equal to `mackup_filepath` therefore copying files to `.mackup` then deleting `.mackup`...\r\n\r\nThanks a lot :(\n", "before_files": [{"content": "\"\"\"Package used to manage the .mackup.cfg config file.\"\"\"\n\nimport os\nimport os.path\n\nfrom .constants import (MACKUP_BACKUP_PATH,\n MACKUP_CONFIG_FILE,\n ENGINE_DROPBOX,\n ENGINE_GDRIVE,\n ENGINE_COPY,\n ENGINE_ICLOUD,\n ENGINE_BOX,\n ENGINE_FS)\n\nfrom .utils import (error,\n get_dropbox_folder_location,\n get_copy_folder_location,\n get_google_drive_folder_location,\n get_icloud_folder_location,\n get_box_folder_location)\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\n\nclass Config(object):\n\n \"\"\"The Mackup Config class.\"\"\"\n\n def __init__(self, filename=None):\n \"\"\"\n Create a Config instance.\n\n Args:\n filename (str): Optional filename of the config file. If empty,\n defaults to MACKUP_CONFIG_FILE\n \"\"\"\n assert isinstance(filename, str) or filename is None\n\n # Initialize the parser\n self._parser = self._setup_parser(filename)\n\n # Do we have an old config file ?\n self._warn_on_old_config()\n\n # Get the storage engine\n self._engine = self._parse_engine()\n\n # Get the path where the Mackup folder is\n self._path = self._parse_path()\n\n # Get the directory replacing 'Mackup', if any\n self._directory = self._parse_directory()\n\n # Get the list of apps to ignore\n self._apps_to_ignore = self._parse_apps_to_ignore()\n\n # Get the list of apps to allow\n self._apps_to_sync = self._parse_apps_to_sync()\n\n @property\n def engine(self):\n \"\"\"\n The engine used by the storage.\n\n ENGINE_DROPBOX, ENGINE_GDRIVE, ENGINE_COPY, ENGINE_ICLOUD, ENGINE_BOX\n or ENGINE_FS.\n\n Returns:\n str\n \"\"\"\n return str(self._engine)\n\n @property\n def path(self):\n \"\"\"\n Path to the Mackup configuration files.\n\n The path to the directory where Mackup is gonna create and store his\n directory.\n\n Returns:\n str\n \"\"\"\n return str(self._path)\n\n @property\n def directory(self):\n \"\"\"\n The name of the Mackup directory, named Mackup by default.\n\n Returns:\n str\n \"\"\"\n return str(self._directory)\n\n @property\n def fullpath(self):\n \"\"\"\n Full path to the Mackup configuration files.\n\n The full path to the directory when Mackup is storing the configuration\n files.\n\n Returns:\n str\n \"\"\"\n return str(os.path.join(self.path, self.directory))\n\n @property\n def apps_to_ignore(self):\n \"\"\"\n Get the list of applications ignored in the config file.\n\n Returns:\n set. Set of application names to ignore, lowercase\n \"\"\"\n return set(self._apps_to_ignore)\n\n @property\n def apps_to_sync(self):\n \"\"\"\n Get the list of applications allowed in the config file.\n\n Returns:\n set. Set of application names to allow, lowercase\n \"\"\"\n return set(self._apps_to_sync)\n\n def _setup_parser(self, filename=None):\n \"\"\"\n Configure the ConfigParser instance the way we want it.\n\n Args:\n filename (str) or None\n\n Returns:\n SafeConfigParser\n \"\"\"\n assert isinstance(filename, str) or filename is None\n\n # If we are not overriding the config filename\n if not filename:\n filename = MACKUP_CONFIG_FILE\n\n parser = configparser.SafeConfigParser(allow_no_value=True)\n parser.read(os.path.join(os.path.join(os.environ['HOME'], filename)))\n\n return parser\n\n def _warn_on_old_config(self):\n \"\"\"Warn the user if an old config format is detected.\"\"\"\n # Is an old setion is in the config file ?\n old_sections = ['Allowed Applications', 'Ignored Applications']\n for old_section in old_sections:\n if self._parser.has_section(old_section):\n error(\"Old config file detected. Aborting.\\n\"\n \"\\n\"\n \"An old section (e.g. [Allowed Applications]\"\n \" or [Ignored Applications] has been detected\"\n \" in your {} file.\\n\"\n \"I'd rather do nothing than do something you\"\n \" do not want me to do.\\n\"\n \"\\n\"\n \"Please read the up to date documentation on\"\n \" <https://github.com/lra/mackup> and migrate\"\n \" your configuration file.\"\n .format(MACKUP_CONFIG_FILE))\n\n def _parse_engine(self):\n \"\"\"\n Parse the storage engine in the config.\n\n Returns:\n str\n \"\"\"\n if self._parser.has_option('storage', 'engine'):\n engine = str(self._parser.get('storage', 'engine'))\n else:\n engine = ENGINE_DROPBOX\n\n assert isinstance(engine, str)\n\n if engine not in [ENGINE_DROPBOX,\n ENGINE_GDRIVE,\n ENGINE_COPY,\n ENGINE_ICLOUD,\n ENGINE_BOX,\n ENGINE_FS]:\n raise ConfigError('Unknown storage engine: {}'.format(engine))\n\n return str(engine)\n\n def _parse_path(self):\n \"\"\"\n Parse the storage path in the config.\n\n Returns:\n str\n \"\"\"\n if self.engine == ENGINE_DROPBOX:\n path = get_dropbox_folder_location()\n elif self.engine == ENGINE_GDRIVE:\n path = get_google_drive_folder_location()\n elif self.engine == ENGINE_COPY:\n path = get_copy_folder_location()\n elif self.engine == ENGINE_ICLOUD:\n path = get_icloud_folder_location()\n elif self.engine == ENGINE_BOX:\n path = get_box_folder_location()\n elif self.engine == ENGINE_FS:\n if self._parser.has_option('storage', 'path'):\n cfg_path = self._parser.get('storage', 'path')\n path = os.path.join(os.environ['HOME'], cfg_path)\n else:\n raise ConfigError(\"The required 'path' can't be found while\"\n \" the 'file_system' engine is used.\")\n\n return str(path)\n\n def _parse_directory(self):\n \"\"\"\n Parse the storage directory in the config.\n\n Returns:\n str\n \"\"\"\n if self._parser.has_option('storage', 'directory'):\n directory = self._parser.get('storage', 'directory')\n else:\n directory = MACKUP_BACKUP_PATH\n\n return str(directory)\n\n def _parse_apps_to_ignore(self):\n \"\"\"\n Parse the applications to ignore in the config.\n\n Returns:\n set\n \"\"\"\n # We ignore nothing by default\n apps_to_ignore = set()\n\n # Is the \"[applications_to_ignore]\" in the cfg file ?\n section_title = 'applications_to_ignore'\n if self._parser.has_section(section_title):\n apps_to_ignore = set(self._parser.options(section_title))\n\n return apps_to_ignore\n\n def _parse_apps_to_sync(self):\n \"\"\"\n Parse the applications to backup in the config.\n\n Returns:\n set\n \"\"\"\n # We allow nothing by default\n apps_to_sync = set()\n\n # Is the \"[applications_to_sync]\" section in the cfg file ?\n section_title = 'applications_to_sync'\n if self._parser.has_section(section_title):\n apps_to_sync = set(self._parser.options(section_title))\n\n return apps_to_sync\n\n\nclass ConfigError(Exception):\n\n \"\"\"Exception used for handle errors in the configuration.\"\"\"\n\n pass\n", "path": "mackup/config.py"}]} | 3,013 | 200 |
gh_patches_debug_19839 | rasdani/github-patches | git_diff | translate__pootle-3723 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check 'delete' checkbox for a new entry renders error
_Version: 2.5.1_
_Originally posted by Mike Massonnet:_
_Imported from attachment 976:_
``` pytb
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/django/core/handlers/base.py", line 109, in get_response
response = callback(request, *callback_args, **callback_kwargs)
File "/usr/lib/python2.7/dist-packages/pootle/core/decorators.py", line 87, in wrapped
return func(request, path_obj, *args, **kwargs)
File "/usr/lib/python2.7/dist-packages/pootle/core/decorators.py", line 180, in _wrapped
return func(request, *args, **kwargs)
File "/usr/lib/python2.7/dist-packages/pootle/apps/pootle_project/views.py", line 427, in project_admin
exclude=('description',))
File "/usr/lib/python2.7/dist-packages/pootle/apps/pootle_app/views/admin/util.py", line 191, in edit
queryset=queryset, **kwargs)
File "/usr/lib/python2.7/dist-packages/pootle/apps/pootle_app/views/admin/util.py", line 172, in process_modelformset
if formset.is_valid():
File "/usr/lib/python2.7/dist-packages/django/forms/formsets.py", line 272, in is_valid
err = self.errors
File "/usr/lib/python2.7/dist-packages/django/forms/formsets.py", line 250, in _get_errors
self.full_clean()
File "/usr/lib/python2.7/dist-packages/django/forms/formsets.py", line 293, in full_clean
self._errors.append(form.errors)
File "/usr/lib/python2.7/dist-packages/django/forms/forms.py", line 115, in _get_errors
self.full_clean()
File "/usr/lib/python2.7/dist-packages/django/forms/forms.py", line 272, in full_clean
self._post_clean()
File "/usr/lib/python2.7/dist-packages/django/forms/models.py", line 309, in _post_clean
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
File "/usr/lib/python2.7/dist-packages/django/forms/models.py", line 51, in construct_instance
f.save_form_data(instance, cleaned_data[f.name])
File "/usr/lib/python2.7/dist-packages/django/db/models/fields/__init__.py", line 454, in save_form_data
setattr(instance, self.name, data)
File "/usr/lib/python2.7/dist-packages/django/db/models/fields/related.py", line 362, in __set__
(instance._meta.object_name, self.field.name))
ValueError: Cannot assign None: "TranslationProject.language" does not allow null values.
```
_Created [attachment 976](http://bugs.locamotion.org/attachment.cgi?id=976)_
pootle-check-all-traceback.txt
Hello,
In the adminstration page of a project, checking the Delete checkbox for a new entry renders an error. This checkbox is also automatically selected when checking the Delete checkbox at the top of the form, to check all checkboxes.
The following error is rendered:
"Cannot assign None: "TranslationProject.language" does not allow null values."
See attached screenshots and traceback.
Regards,
Mike
</issue>
<code>
[start of pootle/apps/pootle_app/views/admin/util.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.forms.models import modelformset_factory
11 from django.forms.utils import ErrorList
12 from django.shortcuts import render
13 from django.utils.safestring import mark_safe
14 from django.utils.translation import ugettext as _
15
16 from pootle.core.paginator import paginate
17
18
19 def form_set_as_table(formset, link=None, linkfield='code'):
20 """Create an HTML table from the formset. The first form in the
21 formset is used to obtain a list of the fields that need to be
22 displayed.
23
24 Errors, if there are any, appear in the row above the form which
25 triggered any errors.
26
27 If the forms are based on database models, the order of the
28 columns is determined by the order of the fields in the model
29 specification.
30 """
31
32 def add_header(result, fields, form):
33 result.append('<tr>\n')
34 for field in fields:
35 widget = form.fields[field].widget
36 widget_name = widget.__class__.__name__
37
38 if widget.is_hidden or \
39 widget_name in ('CheckboxInput', 'SelectMultiple'):
40 result.append('<th class="sorttable_nosort">')
41 else:
42 result.append('<th>')
43
44 if widget_name in ('CheckboxInput',):
45 result.append(form[field].as_widget())
46 result.append(form[field].label_tag())
47 elif form.fields[field].label is not None and not widget.is_hidden:
48 result.append(unicode(form.fields[field].label))
49
50 result.append('</th>\n')
51 result.append('</tr>\n')
52
53 def add_footer(result, fields, form):
54 result.append('<tr>\n')
55 for field in fields:
56 field_obj = form.fields[field]
57 result.append('<td>')
58
59 if field_obj.label is not None and not field_obj.widget.is_hidden:
60 result.append(unicode(field_obj.label))
61
62 result.append('</td>\n')
63 result.append('</tr>\n')
64
65 def add_errors(result, fields, form):
66 # If the form has errors, then we'll add a table row with the
67 # errors.
68 if len(form.errors) > 0:
69 result.append('<tr>\n')
70 for field in fields:
71 result.append('<td>')
72 result.append(form.errors.get(field, ErrorList()).as_ul())
73 result.append('</td>\n')
74
75 result.append('</tr>\n')
76
77 def add_widgets(result, fields, form, link):
78 result.append('<tr class="item">\n')
79 for i, field in enumerate(fields):
80 result.append('<td class="%s">' % field)
81 # Include a hidden element containing the form's id to the
82 # first column.
83 if i == 0:
84 result.append(form['id'].as_hidden())
85
86 # `link` indicates whether we put the first field as a link or as
87 # widget
88 if field == linkfield and linkfield in form.initial and link:
89 if callable(link):
90 result.append(link(form.instance))
91 result.append(form[field].as_hidden())
92 else:
93 result.append(form[field].as_widget())
94
95 result.append('</td>\n')
96 result.append('</tr>\n')
97
98 result = []
99 try:
100 first_form = formset.forms[0]
101 # Get the fields of the form, but filter our the 'id' field,
102 # since we don't want to print a table column for it.
103 fields = [field for field in first_form.fields if field != 'id']
104
105 result.append('<thead>\n')
106 add_header(result, fields, first_form)
107 result.append('</thead>\n')
108 result.append('<tfoot>\n')
109 add_footer(result, fields, first_form)
110 result.append('</tfoot>\n')
111
112 result.append('<tbody>\n')
113 for i, form in enumerate(formset.forms):
114 add_errors(result, fields, form)
115 add_widgets(result, fields, form, link)
116
117 result.append('</tbody>\n')
118 except IndexError:
119 result.append('<tr>\n')
120 result.append('<td>\n')
121 result.append(_('No files in this project.'))
122 result.append('</td>\n')
123 result.append('</tr>\n')
124
125 return u''.join(result)
126
127
128 def process_modelformset(request, model_class, queryset, **kwargs):
129 """With the Django model class `model_class` and the given `queryset`,
130 construct a formset process its submission."""
131
132 # Create a formset class for the model `model_class` (i.e. it will contain
133 # forms whose contents are based on the fields of `model_class`);
134 # parameters for the construction of the forms used in the formset should
135 # be in kwargs.
136 formset_class = modelformset_factory(model_class, **kwargs)
137
138 if queryset is None:
139 queryset = model_class.objects.all()
140
141 # If the request is a POST, we want to possibly update our data
142 if request.method == 'POST' and request.POST:
143 # Create a formset from all the 'model_class' instances whose values
144 # will be updated using the contents of request.POST
145 objects = paginate(request, queryset)
146 formset = formset_class(request.POST, queryset=objects.object_list)
147
148 # Validate all the forms in the formset
149 if formset.is_valid():
150 # If all is well, Django can save all our data for us
151 formset.save()
152 else:
153 # Otherwise, complain to the user that something went wrong
154 return formset, _("There are errors in the form. Please review "
155 "the problems below."), objects
156
157 # Hack to force reevaluation of same query
158 queryset = queryset.filter()
159
160 objects = paginate(request, queryset)
161
162 return formset_class(queryset=objects.object_list), None, objects
163
164
165 def edit(request, template, model_class, ctx=None,
166 link=None, linkfield='code', queryset=None, **kwargs):
167 formset, msg, objects = process_modelformset(request, model_class,
168 queryset=queryset, **kwargs)
169 if ctx is None:
170 ctx = {}
171
172 ctx.update({
173 'formset_text': mark_safe(form_set_as_table(formset, link, linkfield)),
174 'formset': formset,
175 'objects': objects,
176 'error_msg': msg,
177 })
178
179 return render(request, template, ctx)
180
[end of pootle/apps/pootle_app/views/admin/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_app/views/admin/util.py b/pootle/apps/pootle_app/views/admin/util.py
--- a/pootle/apps/pootle_app/views/admin/util.py
+++ b/pootle/apps/pootle_app/views/admin/util.py
@@ -7,6 +7,7 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+from django import forms
from django.forms.models import modelformset_factory
from django.forms.utils import ErrorList
from django.shortcuts import render
@@ -110,6 +111,10 @@
result.append('</tfoot>\n')
result.append('<tbody>\n')
+
+ # Do not display the delete checkbox for the 'add a new entry' form.
+ formset.forms[-1].fields['DELETE'].widget = forms.HiddenInput()
+
for i, form in enumerate(formset.forms):
add_errors(result, fields, form)
add_widgets(result, fields, form, link)
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/views/admin/util.py b/pootle/apps/pootle_app/views/admin/util.py\n--- a/pootle/apps/pootle_app/views/admin/util.py\n+++ b/pootle/apps/pootle_app/views/admin/util.py\n@@ -7,6 +7,7 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from django import forms\n from django.forms.models import modelformset_factory\n from django.forms.utils import ErrorList\n from django.shortcuts import render\n@@ -110,6 +111,10 @@\n result.append('</tfoot>\\n')\n \n result.append('<tbody>\\n')\n+\n+ # Do not display the delete checkbox for the 'add a new entry' form.\n+ formset.forms[-1].fields['DELETE'].widget = forms.HiddenInput()\n+\n for i, form in enumerate(formset.forms):\n add_errors(result, fields, form)\n add_widgets(result, fields, form, link)\n", "issue": "Check 'delete' checkbox for a new entry renders error\n_Version: 2.5.1_\n\n_Originally posted by Mike Massonnet:_\n\n_Imported from attachment 976:_\n\n``` pytb\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/dist-packages/django/core/handlers/base.py\", line 109, in get_response\n response = callback(request, *callback_args, **callback_kwargs)\n File \"/usr/lib/python2.7/dist-packages/pootle/core/decorators.py\", line 87, in wrapped\n return func(request, path_obj, *args, **kwargs)\n File \"/usr/lib/python2.7/dist-packages/pootle/core/decorators.py\", line 180, in _wrapped\n return func(request, *args, **kwargs)\n File \"/usr/lib/python2.7/dist-packages/pootle/apps/pootle_project/views.py\", line 427, in project_admin\n exclude=('description',))\n File \"/usr/lib/python2.7/dist-packages/pootle/apps/pootle_app/views/admin/util.py\", line 191, in edit\n queryset=queryset, **kwargs)\n File \"/usr/lib/python2.7/dist-packages/pootle/apps/pootle_app/views/admin/util.py\", line 172, in process_modelformset\n if formset.is_valid():\n File \"/usr/lib/python2.7/dist-packages/django/forms/formsets.py\", line 272, in is_valid\n err = self.errors\n File \"/usr/lib/python2.7/dist-packages/django/forms/formsets.py\", line 250, in _get_errors\n self.full_clean()\n File \"/usr/lib/python2.7/dist-packages/django/forms/formsets.py\", line 293, in full_clean\n self._errors.append(form.errors)\n File \"/usr/lib/python2.7/dist-packages/django/forms/forms.py\", line 115, in _get_errors\n self.full_clean()\n File \"/usr/lib/python2.7/dist-packages/django/forms/forms.py\", line 272, in full_clean\n self._post_clean()\n File \"/usr/lib/python2.7/dist-packages/django/forms/models.py\", line 309, in _post_clean\n self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)\n File \"/usr/lib/python2.7/dist-packages/django/forms/models.py\", line 51, in construct_instance\n f.save_form_data(instance, cleaned_data[f.name])\n File \"/usr/lib/python2.7/dist-packages/django/db/models/fields/__init__.py\", line 454, in save_form_data\n setattr(instance, self.name, data)\n File \"/usr/lib/python2.7/dist-packages/django/db/models/fields/related.py\", line 362, in __set__\n (instance._meta.object_name, self.field.name))\nValueError: Cannot assign None: \"TranslationProject.language\" does not allow null values.\n```\n\n_Created [attachment 976](http://bugs.locamotion.org/attachment.cgi?id=976)_\n\npootle-check-all-traceback.txt\n\nHello,\n\nIn the adminstration page of a project, checking the Delete checkbox for a new entry renders an error. This checkbox is also automatically selected when checking the Delete checkbox at the top of the form, to check all checkboxes.\n\nThe following error is rendered:\n\"Cannot assign None: \"TranslationProject.language\" does not allow null values.\"\n\nSee attached screenshots and traceback.\n\nRegards,\nMike\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.forms.models import modelformset_factory\nfrom django.forms.utils import ErrorList\nfrom django.shortcuts import render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext as _\n\nfrom pootle.core.paginator import paginate\n\n\ndef form_set_as_table(formset, link=None, linkfield='code'):\n \"\"\"Create an HTML table from the formset. The first form in the\n formset is used to obtain a list of the fields that need to be\n displayed.\n\n Errors, if there are any, appear in the row above the form which\n triggered any errors.\n\n If the forms are based on database models, the order of the\n columns is determined by the order of the fields in the model\n specification.\n \"\"\"\n\n def add_header(result, fields, form):\n result.append('<tr>\\n')\n for field in fields:\n widget = form.fields[field].widget\n widget_name = widget.__class__.__name__\n\n if widget.is_hidden or \\\n widget_name in ('CheckboxInput', 'SelectMultiple'):\n result.append('<th class=\"sorttable_nosort\">')\n else:\n result.append('<th>')\n\n if widget_name in ('CheckboxInput',):\n result.append(form[field].as_widget())\n result.append(form[field].label_tag())\n elif form.fields[field].label is not None and not widget.is_hidden:\n result.append(unicode(form.fields[field].label))\n\n result.append('</th>\\n')\n result.append('</tr>\\n')\n\n def add_footer(result, fields, form):\n result.append('<tr>\\n')\n for field in fields:\n field_obj = form.fields[field]\n result.append('<td>')\n\n if field_obj.label is not None and not field_obj.widget.is_hidden:\n result.append(unicode(field_obj.label))\n\n result.append('</td>\\n')\n result.append('</tr>\\n')\n\n def add_errors(result, fields, form):\n # If the form has errors, then we'll add a table row with the\n # errors.\n if len(form.errors) > 0:\n result.append('<tr>\\n')\n for field in fields:\n result.append('<td>')\n result.append(form.errors.get(field, ErrorList()).as_ul())\n result.append('</td>\\n')\n\n result.append('</tr>\\n')\n\n def add_widgets(result, fields, form, link):\n result.append('<tr class=\"item\">\\n')\n for i, field in enumerate(fields):\n result.append('<td class=\"%s\">' % field)\n # Include a hidden element containing the form's id to the\n # first column.\n if i == 0:\n result.append(form['id'].as_hidden())\n\n # `link` indicates whether we put the first field as a link or as\n # widget\n if field == linkfield and linkfield in form.initial and link:\n if callable(link):\n result.append(link(form.instance))\n result.append(form[field].as_hidden())\n else:\n result.append(form[field].as_widget())\n\n result.append('</td>\\n')\n result.append('</tr>\\n')\n\n result = []\n try:\n first_form = formset.forms[0]\n # Get the fields of the form, but filter our the 'id' field,\n # since we don't want to print a table column for it.\n fields = [field for field in first_form.fields if field != 'id']\n\n result.append('<thead>\\n')\n add_header(result, fields, first_form)\n result.append('</thead>\\n')\n result.append('<tfoot>\\n')\n add_footer(result, fields, first_form)\n result.append('</tfoot>\\n')\n\n result.append('<tbody>\\n')\n for i, form in enumerate(formset.forms):\n add_errors(result, fields, form)\n add_widgets(result, fields, form, link)\n\n result.append('</tbody>\\n')\n except IndexError:\n result.append('<tr>\\n')\n result.append('<td>\\n')\n result.append(_('No files in this project.'))\n result.append('</td>\\n')\n result.append('</tr>\\n')\n\n return u''.join(result)\n\n\ndef process_modelformset(request, model_class, queryset, **kwargs):\n \"\"\"With the Django model class `model_class` and the given `queryset`,\n construct a formset process its submission.\"\"\"\n\n # Create a formset class for the model `model_class` (i.e. it will contain\n # forms whose contents are based on the fields of `model_class`);\n # parameters for the construction of the forms used in the formset should\n # be in kwargs.\n formset_class = modelformset_factory(model_class, **kwargs)\n\n if queryset is None:\n queryset = model_class.objects.all()\n\n # If the request is a POST, we want to possibly update our data\n if request.method == 'POST' and request.POST:\n # Create a formset from all the 'model_class' instances whose values\n # will be updated using the contents of request.POST\n objects = paginate(request, queryset)\n formset = formset_class(request.POST, queryset=objects.object_list)\n\n # Validate all the forms in the formset\n if formset.is_valid():\n # If all is well, Django can save all our data for us\n formset.save()\n else:\n # Otherwise, complain to the user that something went wrong\n return formset, _(\"There are errors in the form. Please review \"\n \"the problems below.\"), objects\n\n # Hack to force reevaluation of same query\n queryset = queryset.filter()\n\n objects = paginate(request, queryset)\n\n return formset_class(queryset=objects.object_list), None, objects\n\n\ndef edit(request, template, model_class, ctx=None,\n link=None, linkfield='code', queryset=None, **kwargs):\n formset, msg, objects = process_modelformset(request, model_class,\n queryset=queryset, **kwargs)\n if ctx is None:\n ctx = {}\n\n ctx.update({\n 'formset_text': mark_safe(form_set_as_table(formset, link, linkfield)),\n 'formset': formset,\n 'objects': objects,\n 'error_msg': msg,\n })\n\n return render(request, template, ctx)\n", "path": "pootle/apps/pootle_app/views/admin/util.py"}]} | 3,197 | 232 |
gh_patches_debug_17805 | rasdani/github-patches | git_diff | scrapy__scrapy-3082 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Request serialization should fail for non-picklable objects
The Pickle-based disk queues silently serialize requests that shouldn't be serialized in Python<=3.5. I found this problem when dumping a request with an `ItemLoader` object in its `meta` dict. Python 3.6 fails in [this line](https://github.com/scrapy/scrapy/blob/1.4/scrapy/squeues.py#L27) with `TypeError: can't pickle HtmlElement objects`, because the loader contains a `Selector`, which in turns contains an `HtmlElement` object.
I tested this using the https://github.com/scrapinghub/scrapinghub-stack-scrapy repository, and found that `pickle.loads(pickle.dumps(selector))` doesn't fail, but generates a broken object.
#### Python 2.7, Scrapy 1.3.3 (https://github.com/scrapinghub/scrapinghub-stack-scrapy/tree/branch-1.3)
```
root@04bfc6cf84cd:/# scrapy version -v
Scrapy : 1.3.3
lxml : 3.7.2.0
libxml2 : 2.9.3
cssselect : 1.0.1
parsel : 1.1.0
w3lib : 1.17.0
Twisted : 16.6.0
Python : 2.7.14 (default, Dec 12 2017, 16:55:09) - [GCC 4.9.2]
pyOpenSSL : 16.2.0 (OpenSSL 1.0.1t 3 May 2016)
Platform : Linux-4.9.44-linuxkit-aufs-x86_64-with-debian-8.10
root@04bfc6cf84cd:/# scrapy shell "http://example.org"
2017-12-29 16:49:27 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scrapybot)
(...)
>>> from six.moves import cPickle as pickle
>>> s2 = pickle.loads(pickle.dumps(response.selector, protocol=2))
>>> response.selector.css('a')
[<Selector xpath=u'descendant-or-self::a' data=u'<a href="http://www.iana.org/domains/exa'>]
>>> s2.css('a')
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/local/lib/python2.7/site-packages/parsel/selector.py", line 227, in css
return self.xpath(self._css2xpath(query))
File "/usr/local/lib/python2.7/site-packages/parsel/selector.py", line 203, in xpath
**kwargs)
File "src/lxml/lxml.etree.pyx", line 1584, in lxml.etree._Element.xpath (src/lxml/lxml.etree.c:59349)
File "src/lxml/xpath.pxi", line 257, in lxml.etree.XPathElementEvaluator.__init__ (src/lxml/lxml.etree.c:170478)
File "src/lxml/apihelpers.pxi", line 19, in lxml.etree._assertValidNode (src/lxml/lxml.etree.c:16482)
AssertionError: invalid Element proxy at 140144569743064
```
#### Python 3.5, Scrapy 1.3.3 (https://github.com/scrapinghub/scrapinghub-stack-scrapy/tree/branch-1.3-py3)
```
root@1945e2154919:/# scrapy version -v
Scrapy : 1.3.3
lxml : 3.7.2.0
libxml2 : 2.9.3
cssselect : 1.0.1
parsel : 1.1.0
w3lib : 1.17.0
Twisted : 16.6.0
Python : 3.5.4 (default, Dec 12 2017, 16:43:39) - [GCC 4.9.2]
pyOpenSSL : 16.2.0 (OpenSSL 1.0.1t 3 May 2016)
Platform : Linux-4.9.44-linuxkit-aufs-x86_64-with-debian-8.10
root@1945e2154919:/# scrapy shell "http://example.org"
2017-12-29 16:52:37 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scrapybot)
(...)
>>> from six.moves import cPickle as pickle
>>> s2 = pickle.loads(pickle.dumps(response.selector, protocol=2))
>>> response.selector.css('a')
[<Selector xpath='descendant-or-self::a' data='<a href="http://www.iana.org/domains/exa'>]
>>> s2.css('a')
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/local/lib/python3.5/site-packages/parsel/selector.py", line 227, in css
return self.xpath(self._css2xpath(query))
File "/usr/local/lib/python3.5/site-packages/parsel/selector.py", line 203, in xpath
**kwargs)
File "src/lxml/lxml.etree.pyx", line 1584, in lxml.etree._Element.xpath (src/lxml/lxml.etree.c:59349)
File "src/lxml/xpath.pxi", line 257, in lxml.etree.XPathElementEvaluator.__init__ (src/lxml/lxml.etree.c:170478)
File "src/lxml/apihelpers.pxi", line 19, in lxml.etree._assertValidNode (src/lxml/lxml.etree.c:16482)
AssertionError: invalid Element proxy at 139862544625976
```
#### Python 3.6, Scrapy 1.3.3 (https://github.com/scrapinghub/scrapinghub-stack-scrapy/tree/branch-1.3-py3)
```
root@43e690443ca7:/# scrapy version -v
Scrapy : 1.3.3
lxml : 3.7.2.0
libxml2 : 2.9.3
cssselect : 1.0.1
parsel : 1.1.0
w3lib : 1.17.0
Twisted : 16.6.0
Python : 3.6.4 (default, Dec 21 2017, 01:35:12) - [GCC 4.9.2]
pyOpenSSL : 16.2.0 (OpenSSL 1.0.1t 3 May 2016)
Platform : Linux-4.9.44-linuxkit-aufs-x86_64-with-debian-8.10
root@43e690443ca7:/# scrapy shell "http://example.org"
2017-12-29 16:54:49 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scrapybot)
(...)
>>> from six.moves import cPickle as pickle
>>> s2 = pickle.loads(pickle.dumps(response.selector, protocol=2))
Traceback (most recent call last):
File "<console>", line 1, in <module>
TypeError: can't pickle HtmlElement objects
```
</issue>
<code>
[start of scrapy/squeues.py]
1 """
2 Scheduler queues
3 """
4
5 import marshal
6 from six.moves import cPickle as pickle
7
8 from queuelib import queue
9
10 def _serializable_queue(queue_class, serialize, deserialize):
11
12 class SerializableQueue(queue_class):
13
14 def push(self, obj):
15 s = serialize(obj)
16 super(SerializableQueue, self).push(s)
17
18 def pop(self):
19 s = super(SerializableQueue, self).pop()
20 if s:
21 return deserialize(s)
22
23 return SerializableQueue
24
25 def _pickle_serialize(obj):
26 try:
27 return pickle.dumps(obj, protocol=2)
28 # Python>=3.5 raises AttributeError here while
29 # Python<=3.4 raises pickle.PicklingError
30 except (pickle.PicklingError, AttributeError) as e:
31 raise ValueError(str(e))
32
33 PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
34 _pickle_serialize, pickle.loads)
35 PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
36 _pickle_serialize, pickle.loads)
37 MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
38 marshal.dumps, marshal.loads)
39 MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
40 marshal.dumps, marshal.loads)
41 FifoMemoryQueue = queue.FifoMemoryQueue
42 LifoMemoryQueue = queue.LifoMemoryQueue
43
[end of scrapy/squeues.py]
[start of setup.py]
1 from os.path import dirname, join
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
7 version = f.read().decode('ascii').strip()
8
9
10 def has_environment_marker_platform_impl_support():
11 """Code extracted from 'pytest/setup.py'
12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
13
14 The first known release to support environment marker with range operators
15 it is 18.5, see:
16 https://setuptools.readthedocs.io/en/latest/history.html#id235
17 """
18 return parse_version(setuptools_version) >= parse_version('18.5')
19
20
21 extras_require = {}
22
23 if has_environment_marker_platform_impl_support():
24 extras_require[':platform_python_implementation == "PyPy"'] = [
25 'PyPyDispatcher>=2.1.0',
26 ]
27
28
29 setup(
30 name='Scrapy',
31 version=version,
32 url='https://scrapy.org',
33 description='A high-level Web Crawling and Web Scraping framework',
34 long_description=open('README.rst').read(),
35 author='Scrapy developers',
36 maintainer='Pablo Hoffman',
37 maintainer_email='[email protected]',
38 license='BSD',
39 packages=find_packages(exclude=('tests', 'tests.*')),
40 include_package_data=True,
41 zip_safe=False,
42 entry_points={
43 'console_scripts': ['scrapy = scrapy.cmdline:execute']
44 },
45 classifiers=[
46 'Framework :: Scrapy',
47 'Development Status :: 5 - Production/Stable',
48 'Environment :: Console',
49 'Intended Audience :: Developers',
50 'License :: OSI Approved :: BSD License',
51 'Operating System :: OS Independent',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.4',
57 'Programming Language :: Python :: 3.5',
58 'Programming Language :: Python :: 3.6',
59 'Programming Language :: Python :: Implementation :: CPython',
60 'Programming Language :: Python :: Implementation :: PyPy',
61 'Topic :: Internet :: WWW/HTTP',
62 'Topic :: Software Development :: Libraries :: Application Frameworks',
63 'Topic :: Software Development :: Libraries :: Python Modules',
64 ],
65 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
66 install_requires=[
67 'Twisted>=13.1.0',
68 'w3lib>=1.17.0',
69 'queuelib',
70 'lxml',
71 'pyOpenSSL',
72 'cssselect>=0.9',
73 'six>=1.5.2',
74 'parsel>=1.1',
75 'PyDispatcher>=2.0.5',
76 'service_identity',
77 ],
78 extras_require=extras_require,
79 )
80
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/squeues.py b/scrapy/squeues.py
--- a/scrapy/squeues.py
+++ b/scrapy/squeues.py
@@ -25,9 +25,10 @@
def _pickle_serialize(obj):
try:
return pickle.dumps(obj, protocol=2)
- # Python>=3.5 raises AttributeError here while
- # Python<=3.4 raises pickle.PicklingError
- except (pickle.PicklingError, AttributeError) as e:
+ # Python <= 3.4 raises pickle.PicklingError here while
+ # 3.5 <= Python < 3.6 raises AttributeError and
+ # Python >= 3.6 raises TypeError
+ except (pickle.PicklingError, AttributeError, TypeError) as e:
raise ValueError(str(e))
PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -71,7 +71,7 @@
'pyOpenSSL',
'cssselect>=0.9',
'six>=1.5.2',
- 'parsel>=1.1',
+ 'parsel>=1.4',
'PyDispatcher>=2.0.5',
'service_identity',
],
| {"golden_diff": "diff --git a/scrapy/squeues.py b/scrapy/squeues.py\n--- a/scrapy/squeues.py\n+++ b/scrapy/squeues.py\n@@ -25,9 +25,10 @@\n def _pickle_serialize(obj):\n try:\n return pickle.dumps(obj, protocol=2)\n- # Python>=3.5 raises AttributeError here while\n- # Python<=3.4 raises pickle.PicklingError\n- except (pickle.PicklingError, AttributeError) as e:\n+ # Python <= 3.4 raises pickle.PicklingError here while\n+ # 3.5 <= Python < 3.6 raises AttributeError and\n+ # Python >= 3.6 raises TypeError\n+ except (pickle.PicklingError, AttributeError, TypeError) as e:\n raise ValueError(str(e))\n \n PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \\\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,7 +71,7 @@\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n- 'parsel>=1.1',\n+ 'parsel>=1.4',\n 'PyDispatcher>=2.0.5',\n 'service_identity',\n ],\n", "issue": "Request serialization should fail for non-picklable objects\nThe Pickle-based disk queues silently serialize requests that shouldn't be serialized in Python<=3.5. I found this problem when dumping a request with an `ItemLoader` object in its `meta` dict. Python 3.6 fails in [this line](https://github.com/scrapy/scrapy/blob/1.4/scrapy/squeues.py#L27) with `TypeError: can't pickle HtmlElement objects`, because the loader contains a `Selector`, which in turns contains an `HtmlElement` object.\r\n\r\nI tested this using the https://github.com/scrapinghub/scrapinghub-stack-scrapy repository, and found that `pickle.loads(pickle.dumps(selector))` doesn't fail, but generates a broken object.\r\n\r\n#### Python 2.7, Scrapy 1.3.3 (https://github.com/scrapinghub/scrapinghub-stack-scrapy/tree/branch-1.3)\r\n```\r\nroot@04bfc6cf84cd:/# scrapy version -v\r\nScrapy : 1.3.3\r\nlxml : 3.7.2.0\r\nlibxml2 : 2.9.3\r\ncssselect : 1.0.1\r\nparsel : 1.1.0\r\nw3lib : 1.17.0\r\nTwisted : 16.6.0\r\nPython : 2.7.14 (default, Dec 12 2017, 16:55:09) - [GCC 4.9.2]\r\npyOpenSSL : 16.2.0 (OpenSSL 1.0.1t 3 May 2016)\r\nPlatform : Linux-4.9.44-linuxkit-aufs-x86_64-with-debian-8.10\r\nroot@04bfc6cf84cd:/# scrapy shell \"http://example.org\"\r\n2017-12-29 16:49:27 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scrapybot)\r\n(...)\r\n>>> from six.moves import cPickle as pickle\r\n>>> s2 = pickle.loads(pickle.dumps(response.selector, protocol=2))\r\n>>> response.selector.css('a')\r\n[<Selector xpath=u'descendant-or-self::a' data=u'<a href=\"http://www.iana.org/domains/exa'>]\r\n>>> s2.css('a')\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/usr/local/lib/python2.7/site-packages/parsel/selector.py\", line 227, in css\r\n return self.xpath(self._css2xpath(query))\r\n File \"/usr/local/lib/python2.7/site-packages/parsel/selector.py\", line 203, in xpath\r\n **kwargs)\r\n File \"src/lxml/lxml.etree.pyx\", line 1584, in lxml.etree._Element.xpath (src/lxml/lxml.etree.c:59349)\r\n File \"src/lxml/xpath.pxi\", line 257, in lxml.etree.XPathElementEvaluator.__init__ (src/lxml/lxml.etree.c:170478)\r\n File \"src/lxml/apihelpers.pxi\", line 19, in lxml.etree._assertValidNode (src/lxml/lxml.etree.c:16482)\r\nAssertionError: invalid Element proxy at 140144569743064\r\n```\r\n\r\n\r\n#### Python 3.5, Scrapy 1.3.3 (https://github.com/scrapinghub/scrapinghub-stack-scrapy/tree/branch-1.3-py3)\r\n```\r\nroot@1945e2154919:/# scrapy version -v\r\nScrapy : 1.3.3\r\nlxml : 3.7.2.0\r\nlibxml2 : 2.9.3\r\ncssselect : 1.0.1\r\nparsel : 1.1.0\r\nw3lib : 1.17.0\r\nTwisted : 16.6.0\r\nPython : 3.5.4 (default, Dec 12 2017, 16:43:39) - [GCC 4.9.2]\r\npyOpenSSL : 16.2.0 (OpenSSL 1.0.1t 3 May 2016)\r\nPlatform : Linux-4.9.44-linuxkit-aufs-x86_64-with-debian-8.10\r\nroot@1945e2154919:/# scrapy shell \"http://example.org\"\r\n2017-12-29 16:52:37 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scrapybot)\r\n(...)\r\n>>> from six.moves import cPickle as pickle\r\n>>> s2 = pickle.loads(pickle.dumps(response.selector, protocol=2))\r\n>>> response.selector.css('a')\r\n[<Selector xpath='descendant-or-self::a' data='<a href=\"http://www.iana.org/domains/exa'>]\r\n>>> s2.css('a')\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.5/site-packages/parsel/selector.py\", line 227, in css\r\n return self.xpath(self._css2xpath(query))\r\n File \"/usr/local/lib/python3.5/site-packages/parsel/selector.py\", line 203, in xpath\r\n **kwargs)\r\n File \"src/lxml/lxml.etree.pyx\", line 1584, in lxml.etree._Element.xpath (src/lxml/lxml.etree.c:59349)\r\n File \"src/lxml/xpath.pxi\", line 257, in lxml.etree.XPathElementEvaluator.__init__ (src/lxml/lxml.etree.c:170478)\r\n File \"src/lxml/apihelpers.pxi\", line 19, in lxml.etree._assertValidNode (src/lxml/lxml.etree.c:16482)\r\nAssertionError: invalid Element proxy at 139862544625976\r\n```\r\n\r\n\r\n#### Python 3.6, Scrapy 1.3.3 (https://github.com/scrapinghub/scrapinghub-stack-scrapy/tree/branch-1.3-py3)\r\n```\r\nroot@43e690443ca7:/# scrapy version -v\r\nScrapy : 1.3.3\r\nlxml : 3.7.2.0\r\nlibxml2 : 2.9.3\r\ncssselect : 1.0.1\r\nparsel : 1.1.0\r\nw3lib : 1.17.0\r\nTwisted : 16.6.0\r\nPython : 3.6.4 (default, Dec 21 2017, 01:35:12) - [GCC 4.9.2]\r\npyOpenSSL : 16.2.0 (OpenSSL 1.0.1t 3 May 2016)\r\nPlatform : Linux-4.9.44-linuxkit-aufs-x86_64-with-debian-8.10\r\nroot@43e690443ca7:/# scrapy shell \"http://example.org\"\r\n2017-12-29 16:54:49 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scrapybot)\r\n(...)\r\n>>> from six.moves import cPickle as pickle\r\n>>> s2 = pickle.loads(pickle.dumps(response.selector, protocol=2))\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\nTypeError: can't pickle HtmlElement objects\r\n```\n", "before_files": [{"content": "\"\"\"\nScheduler queues\n\"\"\"\n\nimport marshal\nfrom six.moves import cPickle as pickle\n\nfrom queuelib import queue\n\ndef _serializable_queue(queue_class, serialize, deserialize):\n\n class SerializableQueue(queue_class):\n\n def push(self, obj):\n s = serialize(obj)\n super(SerializableQueue, self).push(s)\n\n def pop(self):\n s = super(SerializableQueue, self).pop()\n if s:\n return deserialize(s)\n\n return SerializableQueue\n\ndef _pickle_serialize(obj):\n try:\n return pickle.dumps(obj, protocol=2)\n # Python>=3.5 raises AttributeError here while\n # Python<=3.4 raises pickle.PicklingError\n except (pickle.PicklingError, AttributeError) as e:\n raise ValueError(str(e))\n\nPickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \\\n _pickle_serialize, pickle.loads)\nPickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \\\n _pickle_serialize, pickle.loads)\nMarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \\\n marshal.dumps, marshal.loads)\nMarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \\\n marshal.dumps, marshal.loads)\nFifoMemoryQueue = queue.FifoMemoryQueue\nLifoMemoryQueue = queue.LifoMemoryQueue\n", "path": "scrapy/squeues.py"}, {"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n 'Twisted>=13.1.0',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n 'parsel>=1.1',\n 'PyDispatcher>=2.0.5',\n 'service_identity',\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]} | 3,535 | 292 |
gh_patches_debug_27302 | rasdani/github-patches | git_diff | microsoft__ptvsd-913 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use SIO_LOOPBACK_FAST_PATH
Python 3.6 adds the SIO_LOOPBACK_FAST_PATH flag for sockets, which dramatically increases the bandwidth for loopback connections (I got >4x on my machine with the test code on the linked issue).
We should enable this flag where possible in our debugger. The functionality has existed since Windows 8, but using it on earlier OSs may cause an exception. It is safe to use if the socket eventually gets a connection from another IP.
Moved from https://github.com/Microsoft/PTVS/issues/1350
</issue>
<code>
[start of ptvsd/socket.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 from __future__ import absolute_import
6
7 from collections import namedtuple
8 import contextlib
9 import errno
10 import platform
11 import socket
12 try:
13 from urllib.parse import urlparse
14 except ImportError:
15 from urlparse import urlparse
16
17
18 try:
19 ConnectionError # noqa
20 BrokenPipeError # noqa
21 ConnectionResetError # noqa
22 except NameError:
23 class BrokenPipeError(Exception):
24 # EPIPE and ESHUTDOWN
25 pass
26
27 class ConnectionResetError(Exception):
28 # ECONNRESET
29 pass
30
31
32 NOT_CONNECTED = (
33 errno.ENOTCONN,
34 errno.EBADF,
35 )
36
37 CLOSED = (
38 errno.EPIPE,
39 errno.ESHUTDOWN,
40 errno.ECONNRESET,
41 # Windows
42 10038, # "An operation was attempted on something that is not a socket"
43 10058,
44 )
45
46 EOF = NOT_CONNECTED + CLOSED
47
48
49 @contextlib.contextmanager
50 def convert_eof():
51 """A context manager to convert some socket errors into EOFError."""
52 try:
53 yield
54 except ConnectionResetError:
55 raise EOFError
56 except BrokenPipeError:
57 raise EOFError
58 except OSError as exc:
59 if exc.errno in EOF:
60 raise EOFError
61 raise
62
63
64 class TimeoutError(socket.timeout):
65 """A socket timeout happened."""
66
67
68 def is_socket(sock):
69 """Return True if the object can be used as a socket."""
70 return isinstance(sock, socket.socket)
71
72
73 def create_server(host, port, timeout=None):
74 """Return a local server socket listening on the given port."""
75 if host is None:
76 host = 'localhost'
77 server = _new_sock()
78 server.bind((host, port))
79
80 # _, listener_port = server.getsockname()
81 # print('Listening on', listener_port)
82
83 if timeout is not None:
84 server.settimeout(timeout)
85 server.listen(1)
86 return server
87
88
89 def create_client():
90 """Return a client socket that may be connected to a remote address."""
91 return _new_sock()
92
93
94 def _new_sock():
95 sock = socket.socket(socket.AF_INET,
96 socket.SOCK_STREAM,
97 socket.IPPROTO_TCP)
98 if platform.system() == 'Windows':
99 sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
100 else:
101 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
102 return sock
103
104
105 @contextlib.contextmanager
106 def ignored_errno(*ignored):
107 """A context manager that ignores the given errnos."""
108 try:
109 yield
110 except OSError as exc:
111 if exc.errno not in ignored:
112 raise
113
114
115 class KeepAlive(namedtuple('KeepAlive', 'interval idle maxfails')):
116 """TCP keep-alive settings."""
117
118 INTERVAL = 3 # seconds
119 IDLE = 1 # seconds after idle
120 MAX_FAILS = 5
121
122 @classmethod
123 def from_raw(cls, raw):
124 """Return the corresponding KeepAlive."""
125 if raw is None:
126 return None
127 elif isinstance(raw, cls):
128 return raw
129 elif isinstance(raw, (str, int, float)):
130 return cls(raw)
131 else:
132 try:
133 raw = dict(raw)
134 except TypeError:
135 return cls(*raw)
136 else:
137 return cls(**raw)
138
139 def __new__(cls, interval=None, idle=None, maxfails=None):
140 self = super(KeepAlive, cls).__new__(
141 cls,
142 float(interval) if interval or interval == 0 else cls.INTERVAL,
143 float(idle) if idle or idle == 0 else cls.IDLE,
144 float(maxfails) if maxfails or maxfails == 0 else cls.MAX_FAILS,
145 )
146 return self
147
148 def apply(self, sock):
149 """Set the keepalive values on the socket."""
150 sock.setsockopt(socket.SOL_SOCKET,
151 socket.SO_KEEPALIVE,
152 1)
153 interval = self.interval
154 idle = self.idle
155 maxfails = self.maxfails
156 try:
157 if interval > 0:
158 sock.setsockopt(socket.IPPROTO_TCP,
159 socket.TCP_KEEPINTVL,
160 interval)
161 if idle > 0:
162 sock.setsockopt(socket.IPPROTO_TCP,
163 socket.TCP_KEEPIDLE,
164 idle)
165 if maxfails >= 0:
166 sock.setsockopt(socket.IPPROTO_TCP,
167 socket.TCP_KEEPCNT,
168 maxfails)
169 except AttributeError:
170 # mostly linux-only
171 pass
172
173
174 def connect(sock, addr, keepalive=None):
175 """Return the client socket for the next connection."""
176 if addr is None:
177 if keepalive is None or keepalive is True:
178 keepalive = KeepAlive()
179 elif keepalive:
180 keepalive = KeepAlive.from_raw(keepalive)
181 client, _ = sock.accept()
182 if keepalive:
183 keepalive.apply(client)
184 return client
185 else:
186 if keepalive:
187 raise NotImplementedError
188 sock.connect(addr)
189 return sock
190
191
192 def shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED):
193 """Shut down the given socket."""
194 with ignored_errno(*ignored or ()):
195 sock.shutdown(how)
196
197
198 def close_socket(sock):
199 """Shutdown and close the socket."""
200 try:
201 shut_down(sock)
202 except Exception:
203 # TODO: Log errors?
204 pass
205 sock.close()
206
207
208 class Address(namedtuple('Address', 'host port')):
209 """An IP address to use for sockets."""
210
211 @classmethod
212 def from_raw(cls, raw, defaultport=None):
213 """Return an address corresponding to the given data."""
214 if isinstance(raw, cls):
215 return raw
216 elif isinstance(raw, int):
217 return cls(None, raw)
218 elif isinstance(raw, str):
219 if raw == '':
220 return cls('', defaultport)
221 parsed = urlparse(raw)
222 if not parsed.netloc:
223 if parsed.scheme:
224 raise ValueError('invalid address {!r}'.format(raw))
225 return cls.from_raw('x://' + raw, defaultport=defaultport)
226 return cls(
227 parsed.hostname or '',
228 parsed.port if parsed.port else defaultport,
229 )
230 elif not raw:
231 return cls(None, defaultport)
232 else:
233 try:
234 kwargs = dict(**raw)
235 except TypeError:
236 return cls(*raw)
237 else:
238 kwargs.setdefault('host', None)
239 kwargs.setdefault('port', defaultport)
240 return cls(**kwargs)
241
242 @classmethod
243 def as_server(cls, host, port):
244 """Return an address to use as a server address."""
245 return cls(host, port, isserver=True)
246
247 @classmethod
248 def as_client(cls, host, port):
249 """Return an address to use as a server address."""
250 return cls(host, port, isserver=False)
251
252 def __new__(cls, host, port, **kwargs):
253 if host == '*':
254 host = ''
255 isserver = kwargs.pop('isserver', None)
256 if isserver is None:
257 isserver = (host is None or host == '')
258 else:
259 isserver = bool(isserver)
260 if host is None:
261 host = 'localhost'
262 self = super(Address, cls).__new__(
263 cls,
264 str(host),
265 int(port) if port is not None else None,
266 **kwargs
267 )
268 self._isserver = isserver
269 return self
270
271 def __init__(self, *args, **kwargs):
272 if self.port is None:
273 raise TypeError('missing port')
274 if self.port < 0 or self.port > 65535:
275 raise ValueError('port must be non-negative int < 65535')
276
277 def __repr__(self):
278 orig = super(Address, self).__repr__()
279 return '{}, isserver={})'.format(orig[:-1], self._isserver)
280
281 def __eq__(self, other):
282 if not super(Address, self).__eq__(other):
283 return False
284 try:
285 other = self.from_raw(other)
286 except Exception:
287 return False
288 return self._isserver == other._isserver
289
290 @property
291 def isserver(self):
292 return self._isserver
293
[end of ptvsd/socket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ptvsd/socket.py b/ptvsd/socket.py
--- a/ptvsd/socket.py
+++ b/ptvsd/socket.py
@@ -75,14 +75,14 @@
if host is None:
host = 'localhost'
server = _new_sock()
- server.bind((host, port))
-
- # _, listener_port = server.getsockname()
- # print('Listening on', listener_port)
-
- if timeout is not None:
- server.settimeout(timeout)
- server.listen(1)
+ try:
+ server.bind((host, port))
+ if timeout is not None:
+ server.settimeout(timeout)
+ server.listen(1)
+ except Exception:
+ server.close()
+ raise
return server
@@ -96,6 +96,15 @@
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
if platform.system() == 'Windows':
+ try:
+ sock.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
+ except AttributeError:
+ pass # Not supported in python 2.* or <3.6
+ except OSError as ose:
+ if ose.winerror == 10045: # Not supported by OS
+ pass
+ else:
+ raise
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@@ -200,7 +209,6 @@
try:
shut_down(sock)
except Exception:
- # TODO: Log errors?
pass
sock.close()
| {"golden_diff": "diff --git a/ptvsd/socket.py b/ptvsd/socket.py\n--- a/ptvsd/socket.py\n+++ b/ptvsd/socket.py\n@@ -75,14 +75,14 @@\n if host is None:\n host = 'localhost'\n server = _new_sock()\n- server.bind((host, port))\n-\n- # _, listener_port = server.getsockname()\n- # print('Listening on', listener_port)\n-\n- if timeout is not None:\n- server.settimeout(timeout)\n- server.listen(1)\n+ try:\n+ server.bind((host, port))\n+ if timeout is not None:\n+ server.settimeout(timeout)\n+ server.listen(1)\n+ except Exception:\n+ server.close()\n+ raise\n return server\n \n \n@@ -96,6 +96,15 @@\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)\n if platform.system() == 'Windows':\n+ try:\n+ sock.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)\n+ except AttributeError:\n+ pass # Not supported in python 2.* or <3.6\n+ except OSError as ose:\n+ if ose.winerror == 10045: # Not supported by OS\n+ pass\n+ else:\n+ raise\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)\n else:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n@@ -200,7 +209,6 @@\n try:\n shut_down(sock)\n except Exception:\n- # TODO: Log errors?\n pass\n sock.close()\n", "issue": "Use SIO_LOOPBACK_FAST_PATH\nPython 3.6 adds the SIO_LOOPBACK_FAST_PATH flag for sockets, which dramatically increases the bandwidth for loopback connections (I got >4x on my machine with the test code on the linked issue).\r\n\r\nWe should enable this flag where possible in our debugger. The functionality has existed since Windows 8, but using it on earlier OSs may cause an exception. It is safe to use if the socket eventually gets a connection from another IP.\r\n\r\nMoved from https://github.com/Microsoft/PTVS/issues/1350\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nimport contextlib\nimport errno\nimport platform\nimport socket\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n\ntry:\n ConnectionError # noqa\n BrokenPipeError # noqa\n ConnectionResetError # noqa\nexcept NameError:\n class BrokenPipeError(Exception):\n # EPIPE and ESHUTDOWN\n pass\n\n class ConnectionResetError(Exception):\n # ECONNRESET\n pass\n\n\nNOT_CONNECTED = (\n errno.ENOTCONN,\n errno.EBADF,\n)\n\nCLOSED = (\n errno.EPIPE,\n errno.ESHUTDOWN,\n errno.ECONNRESET,\n # Windows\n 10038, # \"An operation was attempted on something that is not a socket\"\n 10058,\n)\n\nEOF = NOT_CONNECTED + CLOSED\n\n\[email protected]\ndef convert_eof():\n \"\"\"A context manager to convert some socket errors into EOFError.\"\"\"\n try:\n yield\n except ConnectionResetError:\n raise EOFError\n except BrokenPipeError:\n raise EOFError\n except OSError as exc:\n if exc.errno in EOF:\n raise EOFError\n raise\n\n\nclass TimeoutError(socket.timeout):\n \"\"\"A socket timeout happened.\"\"\"\n\n\ndef is_socket(sock):\n \"\"\"Return True if the object can be used as a socket.\"\"\"\n return isinstance(sock, socket.socket)\n\n\ndef create_server(host, port, timeout=None):\n \"\"\"Return a local server socket listening on the given port.\"\"\"\n if host is None:\n host = 'localhost'\n server = _new_sock()\n server.bind((host, port))\n\n # _, listener_port = server.getsockname()\n # print('Listening on', listener_port)\n\n if timeout is not None:\n server.settimeout(timeout)\n server.listen(1)\n return server\n\n\ndef create_client():\n \"\"\"Return a client socket that may be connected to a remote address.\"\"\"\n return _new_sock()\n\n\ndef _new_sock():\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)\n if platform.system() == 'Windows':\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)\n else:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return sock\n\n\[email protected]\ndef ignored_errno(*ignored):\n \"\"\"A context manager that ignores the given errnos.\"\"\"\n try:\n yield\n except OSError as exc:\n if exc.errno not in ignored:\n raise\n\n\nclass KeepAlive(namedtuple('KeepAlive', 'interval idle maxfails')):\n \"\"\"TCP keep-alive settings.\"\"\"\n\n INTERVAL = 3 # seconds\n IDLE = 1 # seconds after idle\n MAX_FAILS = 5\n\n @classmethod\n def from_raw(cls, raw):\n \"\"\"Return the corresponding KeepAlive.\"\"\"\n if raw is None:\n return None\n elif isinstance(raw, cls):\n return raw\n elif isinstance(raw, (str, int, float)):\n return cls(raw)\n else:\n try:\n raw = dict(raw)\n except TypeError:\n return cls(*raw)\n else:\n return cls(**raw)\n\n def __new__(cls, interval=None, idle=None, maxfails=None):\n self = super(KeepAlive, cls).__new__(\n cls,\n float(interval) if interval or interval == 0 else cls.INTERVAL,\n float(idle) if idle or idle == 0 else cls.IDLE,\n float(maxfails) if maxfails or maxfails == 0 else cls.MAX_FAILS,\n )\n return self\n\n def apply(self, sock):\n \"\"\"Set the keepalive values on the socket.\"\"\"\n sock.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE,\n 1)\n interval = self.interval\n idle = self.idle\n maxfails = self.maxfails\n try:\n if interval > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPINTVL,\n interval)\n if idle > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n idle)\n if maxfails >= 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPCNT,\n maxfails)\n except AttributeError:\n # mostly linux-only\n pass\n\n\ndef connect(sock, addr, keepalive=None):\n \"\"\"Return the client socket for the next connection.\"\"\"\n if addr is None:\n if keepalive is None or keepalive is True:\n keepalive = KeepAlive()\n elif keepalive:\n keepalive = KeepAlive.from_raw(keepalive)\n client, _ = sock.accept()\n if keepalive:\n keepalive.apply(client)\n return client\n else:\n if keepalive:\n raise NotImplementedError\n sock.connect(addr)\n return sock\n\n\ndef shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED):\n \"\"\"Shut down the given socket.\"\"\"\n with ignored_errno(*ignored or ()):\n sock.shutdown(how)\n\n\ndef close_socket(sock):\n \"\"\"Shutdown and close the socket.\"\"\"\n try:\n shut_down(sock)\n except Exception:\n # TODO: Log errors?\n pass\n sock.close()\n\n\nclass Address(namedtuple('Address', 'host port')):\n \"\"\"An IP address to use for sockets.\"\"\"\n\n @classmethod\n def from_raw(cls, raw, defaultport=None):\n \"\"\"Return an address corresponding to the given data.\"\"\"\n if isinstance(raw, cls):\n return raw\n elif isinstance(raw, int):\n return cls(None, raw)\n elif isinstance(raw, str):\n if raw == '':\n return cls('', defaultport)\n parsed = urlparse(raw)\n if not parsed.netloc:\n if parsed.scheme:\n raise ValueError('invalid address {!r}'.format(raw))\n return cls.from_raw('x://' + raw, defaultport=defaultport)\n return cls(\n parsed.hostname or '',\n parsed.port if parsed.port else defaultport,\n )\n elif not raw:\n return cls(None, defaultport)\n else:\n try:\n kwargs = dict(**raw)\n except TypeError:\n return cls(*raw)\n else:\n kwargs.setdefault('host', None)\n kwargs.setdefault('port', defaultport)\n return cls(**kwargs)\n\n @classmethod\n def as_server(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=True)\n\n @classmethod\n def as_client(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=False)\n\n def __new__(cls, host, port, **kwargs):\n if host == '*':\n host = ''\n isserver = kwargs.pop('isserver', None)\n if isserver is None:\n isserver = (host is None or host == '')\n else:\n isserver = bool(isserver)\n if host is None:\n host = 'localhost'\n self = super(Address, cls).__new__(\n cls,\n str(host),\n int(port) if port is not None else None,\n **kwargs\n )\n self._isserver = isserver\n return self\n\n def __init__(self, *args, **kwargs):\n if self.port is None:\n raise TypeError('missing port')\n if self.port < 0 or self.port > 65535:\n raise ValueError('port must be non-negative int < 65535')\n\n def __repr__(self):\n orig = super(Address, self).__repr__()\n return '{}, isserver={})'.format(orig[:-1], self._isserver)\n\n def __eq__(self, other):\n if not super(Address, self).__eq__(other):\n return False\n try:\n other = self.from_raw(other)\n except Exception:\n return False\n return self._isserver == other._isserver\n\n @property\n def isserver(self):\n return self._isserver\n", "path": "ptvsd/socket.py"}]} | 3,225 | 377 |
gh_patches_debug_20848 | rasdani/github-patches | git_diff | openfun__marsha-2414 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(deposit) support apostrophe in filenames
## Bug Report
**Problematic Behavior**
Loading a file with a typographic apostrophe (or curly quote) in the filename generates an error during the download process
``` xml
<Error>
<Code>InvalidArgument</Code>
<Message>Header value cannot be represented using ISO-8859-1.</Message>
<ArgumentName>response-content-disposition</ArgumentName>
<ArgumentValue>attachment; filename=Capture d’écran du 2023-07-05 17-55-04.png</ArgumentValue>
<RequestId>7576DY31G7YEKEHW</RequestId>
<HostId>20bDgURy8zglt3nW4UJbIbH0G5+c+DOmSeRKq/K2YNwP6rZMMHzS4h0Af91vioEWtbJsRurDzP0=</HostId>
</Error>
```
**Steps to Reproduce**
1. Add a Marsha Deposit XBlock on Studio
2. On student view, upload a file with a typographic apostrophe on its name
3. Try to download the file
**Environment**
- Marsha version: production
- Platform: Open edX
**Possible Solution**
Use a more extensive character set like UTF-8?
Otherwise, escape apostrophes in filenames or replace it by right single quotation marks?
**Additional context/Screenshots**
- OK : https://www.compart.com/fr/unicode/U+0027
- KO : https://www.compart.com/fr/unicode/U+2019
[deposit-typographic-apostrophe-bug.webm](https://github.com/openfun/marsha/assets/23249365/ee43dd63-9b1a-43a7-912c-6ed84f07404c)
</issue>
<code>
[start of src/backend/marsha/deposit/serializers.py]
1 """Structure of deposit related models API responses with Django Rest Framework serializers."""
2 import mimetypes
3 from os.path import splitext
4 from urllib.parse import quote_plus
5
6 from django.conf import settings
7 from django.urls import reverse
8
9 from rest_framework import serializers
10
11 from marsha.core.serializers import (
12 BaseInitiateUploadSerializer,
13 UploadableFileWithExtensionSerializerMixin,
14 get_resource_cloudfront_url_params,
15 )
16 from marsha.core.serializers.playlist import PlaylistLiteSerializer
17 from marsha.core.utils import cloudfront_utils, time_utils
18
19 from ..core.models import User
20 from .models import DepositedFile, FileDepository
21
22
23 class DepositedFileSerializer(
24 UploadableFileWithExtensionSerializerMixin,
25 serializers.ModelSerializer,
26 BaseInitiateUploadSerializer,
27 ):
28 """A serializer to display a deposited file."""
29
30 class Meta: # noqa
31 model = DepositedFile
32 fields = (
33 "filename",
34 "author_name",
35 "id",
36 "file_depository_id",
37 "read",
38 "url",
39 "uploaded_on",
40 "upload_state",
41 "size",
42 )
43 read_only_fields = (
44 "id",
45 "file_depository_id",
46 "url",
47 "uploaded_on",
48 "upload_state",
49 )
50
51 # file_depository = FileDepositorySerializer(read_only=True)
52 # filename = serializers.SerializerMethodField()
53 url = serializers.SerializerMethodField()
54 # Make sure file depository UUID is converted to a string during serialization
55 file_depository_id = serializers.PrimaryKeyRelatedField(
56 read_only=True, pk_field=serializers.CharField()
57 )
58
59 @property
60 def max_upload_file_size(self):
61 """return the deposited max file size define in the settings.
62
63 The @property decorator is used to ease the use of @override_settings
64 in tests. Otherwise the setting is not changed and we can't easily test
65 an upload with a size higher than the one defined in the settings
66 """
67 return settings.DEPOSITED_FILE_SOURCE_MAX_SIZE
68
69 def create(self, validated_data):
70 """Force the file depository field to the file depository of the JWT Token if any,
71 and set the author name to the username of the JWT Token if any.
72
73 Parameters
74 ----------
75 validated_data : dictionary
76 Dictionary of the deserialized values of each field after validation.
77
78 Returns
79 -------
80 dictionary
81 The "validated_data" dictionary is returned after modification.
82
83 """
84 resource = self.context["request"].resource
85 user = self.context["request"].user
86 file_depository_id = self.context["view"].get_related_filedepository_id()
87
88 if not validated_data.get("file_depository_id"):
89 validated_data["file_depository_id"] = file_depository_id
90
91 if resource:
92 validated_data["author_id"] = resource.user.get("id")
93
94 # try to get the most useful username from the token
95 if resource.user:
96 if author_name := (
97 resource.user.get("user_fullname") or resource.user.get("username")
98 ):
99 validated_data["author_name"] = author_name
100 else:
101 validated_data["author_id"] = user.id
102 validated_data["author_name"] = User.objects.get(id=user.id).username
103
104 return super().create(validated_data)
105
106 def _get_extension_string(self, obj):
107 """Deposited file extension with the leading dot.
108
109 Parameters
110 ----------
111 obj : Type[models.DepositedFile]
112 The deposited file that we want to serialize
113
114 Returns
115 -------
116 String
117 The extension with the leading dot if the deposited file has an extension
118 An empty string otherwise
119
120 """
121 return "." + obj.extension if obj.extension else ""
122
123 # def get_filename(self, obj):
124 # """Filename of the deposited file."""
125 # return self._get_filename(obj.title) if obj.uploaded_on else None
126
127 def get_url(self, obj):
128 """Url of the DepositedFile.
129
130 Parameters
131 ----------
132 obj : Type[models.DepositedFile]
133 The deposited file that we want to serialize
134
135 Returns
136 -------
137 String or None
138 the url to fetch the deposited file on CloudFront
139 None if the deposited file is still not uploaded to S3 with success
140
141 """
142 if obj.uploaded_on is None:
143 return None
144
145 base = (
146 f"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/"
147 f"{obj.file_depository.pk}/depositedfile/{obj.pk}/"
148 f"{time_utils.to_timestamp(obj.uploaded_on)}"
149 )
150
151 url = (
152 f"{base:s}{self._get_extension_string(obj)}?"
153 f"response-content-disposition={quote_plus('attachment; filename=' + obj.filename)}"
154 )
155
156 if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:
157 params = get_resource_cloudfront_url_params(
158 "depositedfile", obj.file_depository_id
159 )
160 url = cloudfront_utils.build_signed_url(url, params)
161 return url
162
163
164 class DepositedFileInitiateUploadSerializer(BaseInitiateUploadSerializer):
165 """An initiate-upload serializer dedicated to deposited file."""
166
167 @property
168 def max_upload_file_size(self):
169 """return the deposited max file size define in the settings.
170
171 The @property decorator is used to ease the use of @override_settings
172 in tests. Otherwise the setting is not changed and we can't easily test
173 an upload with a size higher than the one defined in the settings
174 """
175 return settings.DEPOSITED_FILE_SOURCE_MAX_SIZE
176
177 def validate(self, attrs):
178 """Validate if the mimetype is allowed or not."""
179 # mimetype is provided, we directly check it
180 if attrs["mimetype"] != "":
181 attrs["extension"] = mimetypes.guess_extension(attrs["mimetype"])
182
183 # mimetype is not provided, we have to guess it from the extension
184 else:
185 mimetypes.init()
186 extension = splitext(attrs["filename"])[1]
187 mimetype = mimetypes.types_map.get(extension)
188 # extension is added to the data in order to be used later
189 attrs["extension"] = extension
190 attrs["mimetype"] = mimetype
191
192 return attrs
193
194
195 class FileDepositorySerializer(serializers.ModelSerializer):
196 """A serializer to display a FileDepository resource."""
197
198 class Meta: # noqa
199 model = FileDepository
200 fields = (
201 "id",
202 "lti_id",
203 "title",
204 "description",
205 "playlist",
206 )
207 read_only_fields = (
208 "id",
209 "lti_id",
210 "playlist",
211 )
212
213 playlist = PlaylistLiteSerializer(read_only=True)
214
215
216 class FileDepositorySelectLTISerializer(FileDepositorySerializer):
217 """A serializer to display a FileDepository resource for LTI select content request."""
218
219 class Meta: # noqa
220 model = FileDepository
221 fields = (
222 "id",
223 "lti_id",
224 "lti_url",
225 "title",
226 "description",
227 "playlist",
228 )
229
230 lti_url = serializers.SerializerMethodField()
231
232 def get_lti_url(self, obj):
233 """LTI Url of the FileDepository.
234
235 Parameters
236 ----------
237 obj : Type[models.Document]
238 The document that we want to serialize
239
240 Returns
241 -------
242 String
243 the LTI url to be used by LTI consumers
244
245 """
246 return self.context["request"].build_absolute_uri(
247 reverse("deposit:file_depository_lti_view", args=[obj.id]),
248 )
249
[end of src/backend/marsha/deposit/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/backend/marsha/deposit/serializers.py b/src/backend/marsha/deposit/serializers.py
--- a/src/backend/marsha/deposit/serializers.py
+++ b/src/backend/marsha/deposit/serializers.py
@@ -1,7 +1,7 @@
"""Structure of deposit related models API responses with Django Rest Framework serializers."""
import mimetypes
from os.path import splitext
-from urllib.parse import quote_plus
+from urllib.parse import quote, quote_plus
from django.conf import settings
from django.urls import reverse
@@ -148,9 +148,12 @@
f"{time_utils.to_timestamp(obj.uploaded_on)}"
)
+ response_content_disposition = quote_plus(
+ "attachment; filename=" + quote(obj.filename)
+ )
url = (
f"{base:s}{self._get_extension_string(obj)}?"
- f"response-content-disposition={quote_plus('attachment; filename=' + obj.filename)}"
+ f"response-content-disposition={response_content_disposition}"
)
if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:
| {"golden_diff": "diff --git a/src/backend/marsha/deposit/serializers.py b/src/backend/marsha/deposit/serializers.py\n--- a/src/backend/marsha/deposit/serializers.py\n+++ b/src/backend/marsha/deposit/serializers.py\n@@ -1,7 +1,7 @@\n \"\"\"Structure of deposit related models API responses with Django Rest Framework serializers.\"\"\"\n import mimetypes\n from os.path import splitext\n-from urllib.parse import quote_plus\n+from urllib.parse import quote, quote_plus\n \n from django.conf import settings\n from django.urls import reverse\n@@ -148,9 +148,12 @@\n f\"{time_utils.to_timestamp(obj.uploaded_on)}\"\n )\n \n+ response_content_disposition = quote_plus(\n+ \"attachment; filename=\" + quote(obj.filename)\n+ )\n url = (\n f\"{base:s}{self._get_extension_string(obj)}?\"\n- f\"response-content-disposition={quote_plus('attachment; filename=' + obj.filename)}\"\n+ f\"response-content-disposition={response_content_disposition}\"\n )\n \n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n", "issue": "(deposit) support apostrophe in filenames\n## Bug Report\r\n\r\n**Problematic Behavior**\r\nLoading a file with a typographic apostrophe (or curly quote) in the filename generates an error during the download process\r\n\r\n``` xml\r\n<Error>\r\n<Code>InvalidArgument</Code>\r\n<Message>Header value cannot be represented using ISO-8859-1.</Message>\r\n<ArgumentName>response-content-disposition</ArgumentName>\r\n<ArgumentValue>attachment; filename=Capture d\u2019\u00e9cran du 2023-07-05 17-55-04.png</ArgumentValue>\r\n<RequestId>7576DY31G7YEKEHW</RequestId>\r\n<HostId>20bDgURy8zglt3nW4UJbIbH0G5+c+DOmSeRKq/K2YNwP6rZMMHzS4h0Af91vioEWtbJsRurDzP0=</HostId>\r\n</Error>\r\n```\r\n\r\n**Steps to Reproduce**\r\n1. Add a Marsha Deposit XBlock on Studio\r\n2. On student view, upload a file with a typographic apostrophe on its name \r\n3. Try to download the file\r\n\r\n**Environment**\r\n- Marsha version: production\r\n- Platform: Open edX\r\n\r\n**Possible Solution**\r\nUse a more extensive character set like UTF-8?\r\nOtherwise, escape apostrophes in filenames or replace it by right single quotation marks?\r\n\r\n**Additional context/Screenshots**\r\n\r\n- OK : https://www.compart.com/fr/unicode/U+0027\r\n- KO : https://www.compart.com/fr/unicode/U+2019\r\n\r\n[deposit-typographic-apostrophe-bug.webm](https://github.com/openfun/marsha/assets/23249365/ee43dd63-9b1a-43a7-912c-6ed84f07404c)\n", "before_files": [{"content": "\"\"\"Structure of deposit related models API responses with Django Rest Framework serializers.\"\"\"\nimport mimetypes\nfrom os.path import splitext\nfrom urllib.parse import quote_plus\n\nfrom django.conf import settings\nfrom django.urls import reverse\n\nfrom rest_framework import serializers\n\nfrom marsha.core.serializers import (\n BaseInitiateUploadSerializer,\n UploadableFileWithExtensionSerializerMixin,\n get_resource_cloudfront_url_params,\n)\nfrom marsha.core.serializers.playlist import PlaylistLiteSerializer\nfrom marsha.core.utils import cloudfront_utils, time_utils\n\nfrom ..core.models import User\nfrom .models import DepositedFile, FileDepository\n\n\nclass DepositedFileSerializer(\n UploadableFileWithExtensionSerializerMixin,\n serializers.ModelSerializer,\n BaseInitiateUploadSerializer,\n):\n \"\"\"A serializer to display a deposited file.\"\"\"\n\n class Meta: # noqa\n model = DepositedFile\n fields = (\n \"filename\",\n \"author_name\",\n \"id\",\n \"file_depository_id\",\n \"read\",\n \"url\",\n \"uploaded_on\",\n \"upload_state\",\n \"size\",\n )\n read_only_fields = (\n \"id\",\n \"file_depository_id\",\n \"url\",\n \"uploaded_on\",\n \"upload_state\",\n )\n\n # file_depository = FileDepositorySerializer(read_only=True)\n # filename = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n # Make sure file depository UUID is converted to a string during serialization\n file_depository_id = serializers.PrimaryKeyRelatedField(\n read_only=True, pk_field=serializers.CharField()\n )\n\n @property\n def max_upload_file_size(self):\n \"\"\"return the deposited max file size define in the settings.\n\n The @property decorator is used to ease the use of @override_settings\n in tests. Otherwise the setting is not changed and we can't easily test\n an upload with a size higher than the one defined in the settings\n \"\"\"\n return settings.DEPOSITED_FILE_SOURCE_MAX_SIZE\n\n def create(self, validated_data):\n \"\"\"Force the file depository field to the file depository of the JWT Token if any,\n and set the author name to the username of the JWT Token if any.\n\n Parameters\n ----------\n validated_data : dictionary\n Dictionary of the deserialized values of each field after validation.\n\n Returns\n -------\n dictionary\n The \"validated_data\" dictionary is returned after modification.\n\n \"\"\"\n resource = self.context[\"request\"].resource\n user = self.context[\"request\"].user\n file_depository_id = self.context[\"view\"].get_related_filedepository_id()\n\n if not validated_data.get(\"file_depository_id\"):\n validated_data[\"file_depository_id\"] = file_depository_id\n\n if resource:\n validated_data[\"author_id\"] = resource.user.get(\"id\")\n\n # try to get the most useful username from the token\n if resource.user:\n if author_name := (\n resource.user.get(\"user_fullname\") or resource.user.get(\"username\")\n ):\n validated_data[\"author_name\"] = author_name\n else:\n validated_data[\"author_id\"] = user.id\n validated_data[\"author_name\"] = User.objects.get(id=user.id).username\n\n return super().create(validated_data)\n\n def _get_extension_string(self, obj):\n \"\"\"Deposited file extension with the leading dot.\n\n Parameters\n ----------\n obj : Type[models.DepositedFile]\n The deposited file that we want to serialize\n\n Returns\n -------\n String\n The extension with the leading dot if the deposited file has an extension\n An empty string otherwise\n\n \"\"\"\n return \".\" + obj.extension if obj.extension else \"\"\n\n # def get_filename(self, obj):\n # \"\"\"Filename of the deposited file.\"\"\"\n # return self._get_filename(obj.title) if obj.uploaded_on else None\n\n def get_url(self, obj):\n \"\"\"Url of the DepositedFile.\n\n Parameters\n ----------\n obj : Type[models.DepositedFile]\n The deposited file that we want to serialize\n\n Returns\n -------\n String or None\n the url to fetch the deposited file on CloudFront\n None if the deposited file is still not uploaded to S3 with success\n\n \"\"\"\n if obj.uploaded_on is None:\n return None\n\n base = (\n f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/\"\n f\"{obj.file_depository.pk}/depositedfile/{obj.pk}/\"\n f\"{time_utils.to_timestamp(obj.uploaded_on)}\"\n )\n\n url = (\n f\"{base:s}{self._get_extension_string(obj)}?\"\n f\"response-content-disposition={quote_plus('attachment; filename=' + obj.filename)}\"\n )\n\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n params = get_resource_cloudfront_url_params(\n \"depositedfile\", obj.file_depository_id\n )\n url = cloudfront_utils.build_signed_url(url, params)\n return url\n\n\nclass DepositedFileInitiateUploadSerializer(BaseInitiateUploadSerializer):\n \"\"\"An initiate-upload serializer dedicated to deposited file.\"\"\"\n\n @property\n def max_upload_file_size(self):\n \"\"\"return the deposited max file size define in the settings.\n\n The @property decorator is used to ease the use of @override_settings\n in tests. Otherwise the setting is not changed and we can't easily test\n an upload with a size higher than the one defined in the settings\n \"\"\"\n return settings.DEPOSITED_FILE_SOURCE_MAX_SIZE\n\n def validate(self, attrs):\n \"\"\"Validate if the mimetype is allowed or not.\"\"\"\n # mimetype is provided, we directly check it\n if attrs[\"mimetype\"] != \"\":\n attrs[\"extension\"] = mimetypes.guess_extension(attrs[\"mimetype\"])\n\n # mimetype is not provided, we have to guess it from the extension\n else:\n mimetypes.init()\n extension = splitext(attrs[\"filename\"])[1]\n mimetype = mimetypes.types_map.get(extension)\n # extension is added to the data in order to be used later\n attrs[\"extension\"] = extension\n attrs[\"mimetype\"] = mimetype\n\n return attrs\n\n\nclass FileDepositorySerializer(serializers.ModelSerializer):\n \"\"\"A serializer to display a FileDepository resource.\"\"\"\n\n class Meta: # noqa\n model = FileDepository\n fields = (\n \"id\",\n \"lti_id\",\n \"title\",\n \"description\",\n \"playlist\",\n )\n read_only_fields = (\n \"id\",\n \"lti_id\",\n \"playlist\",\n )\n\n playlist = PlaylistLiteSerializer(read_only=True)\n\n\nclass FileDepositorySelectLTISerializer(FileDepositorySerializer):\n \"\"\"A serializer to display a FileDepository resource for LTI select content request.\"\"\"\n\n class Meta: # noqa\n model = FileDepository\n fields = (\n \"id\",\n \"lti_id\",\n \"lti_url\",\n \"title\",\n \"description\",\n \"playlist\",\n )\n\n lti_url = serializers.SerializerMethodField()\n\n def get_lti_url(self, obj):\n \"\"\"LTI Url of the FileDepository.\n\n Parameters\n ----------\n obj : Type[models.Document]\n The document that we want to serialize\n\n Returns\n -------\n String\n the LTI url to be used by LTI consumers\n\n \"\"\"\n return self.context[\"request\"].build_absolute_uri(\n reverse(\"deposit:file_depository_lti_view\", args=[obj.id]),\n )\n", "path": "src/backend/marsha/deposit/serializers.py"}]} | 3,254 | 243 |
gh_patches_debug_21508 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-2804 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
batch-compute resource security-group filter support
Add a security-group filter to the batch-compute resource using the securityGroupIds attribute
</issue>
<code>
[start of c7n/resources/batch.py]
1 # Copyright 2017-2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from c7n.manager import resources
17 from c7n.query import QueryResourceManager
18 from c7n.actions import BaseAction
19 from c7n.utils import local_session, type_schema
20
21
22 @resources.register('batch-compute')
23 class ComputeEnvironment(QueryResourceManager):
24
25 class resource_type(object):
26 service = 'batch'
27 filter_name = 'computeEnvironments'
28 filter_type = 'list'
29 dimension = None
30 id = name = "computeEnvironmentName"
31 enum_spec = (
32 'describe_compute_environments', 'computeEnvironments', None)
33
34
35 @resources.register('batch-definition')
36 class JobDefinition(QueryResourceManager):
37
38 class resource_type(object):
39 service = 'batch'
40 filter_name = 'jobDefinitions'
41 filter_type = 'list'
42 dimension = None
43 id = name = "jobDefinitionName"
44 enum_spec = (
45 'describe_job_definitions', 'jobDefinitions', None)
46
47
48 class StateTransitionFilter(object):
49 """Filter resources by state.
50
51 Try to simplify construction for policy authors by automatically
52 filtering elements (filters or actions) to the resource states
53 they are valid for.
54 """
55 valid_origin_states = ()
56
57 def filter_resource_state(self, resources, key, states=None):
58 states = states or self.valid_origin_states
59 if not states:
60 return resources
61 orig_length = len(resources)
62 results = [r for r in resources if r[key] in states]
63 if orig_length != len(results):
64 self.log.warn(
65 "%s implicitly filtered %d of %d resources with valid %s" % (
66 self.__class__.__name__,
67 len(results), orig_length, key.lower()))
68 return results
69
70
71 @ComputeEnvironment.action_registry.register('update-environment')
72 class UpdateComputeEnvironment(BaseAction, StateTransitionFilter):
73 """Updates an AWS batch compute environment
74
75 :example:
76
77 .. code-block: yaml
78
79 policies:
80 - name: update-environments
81 resource: batch-compute
82 filters:
83 - computeResources.desiredvCpus: 0
84 - state: ENABLED
85 actions:
86 - type: update-environment
87 state: DISABLED
88 """
89 schema = {
90 'type': 'object',
91 'additionalProperties': False,
92 'properties': {
93 'type': {'enum': ['update-environment']},
94 'computeEnvironment': {'type': 'string'},
95 'state': {'type': 'string', 'enum': ['ENABLED', 'DISABLED']},
96 'computeResources': {
97 'type': 'object',
98 'additionalProperties': False,
99 'properties': {
100 'minvCpus': {'type': 'integer'},
101 'maxvCpus': {'type': 'integer'},
102 'desiredvCpus': {'type': 'integer'}
103 }
104 },
105 'serviceRole': {'type': 'string'}
106 }
107 }
108 permissions = ('batch:UpdateComputeEnvironment',)
109 valid_origin_status = ('VALID', 'INVALID')
110
111 def process(self, resources):
112 resources = self.filter_resource_state(
113 resources, 'status', self.valid_origin_status)
114 client = local_session(self.manager.session_factory).client('batch')
115 params = dict(self.data)
116 params.pop('type')
117 for r in resources:
118 params['computeEnvironment'] = r['computeEnvironmentName']
119 client.update_compute_environment(**params)
120
121
122 @ComputeEnvironment.action_registry.register('delete')
123 class DeleteComputeEnvironment(BaseAction, StateTransitionFilter):
124 """Delete an AWS batch compute environment
125
126 :example:
127
128 .. code-block: yaml
129
130 policies:
131 - name: delete-environments
132 resource: batch-compute
133 filters:
134 - computeResources.desiredvCpus: 0
135 action:
136 - type: delete
137 """
138 schema = type_schema('delete')
139 permissions = ('batch:DeleteComputeEnvironment',)
140 valid_origin_states = ('DISABLED',)
141 valid_origin_status = ('VALID', 'INVALID')
142
143 def delete_environment(self, r):
144 client = local_session(self.manager.session_factory).client('batch')
145 client.delete_compute_environment(
146 computeEnvironment=r['computeEnvironmentName'])
147
148 def process(self, resources):
149 resources = self.filter_resource_state(
150 self.filter_resource_state(
151 resources, 'state', self.valid_origin_states),
152 'status', self.valid_origin_status)
153 with self.executor_factory(max_workers=2) as w:
154 list(w.map(self.delete_environment, resources))
155
156
157 @JobDefinition.action_registry.register('deregister')
158 class DefinitionDeregister(BaseAction, StateTransitionFilter):
159 """Deregisters a batch definition
160
161 :example:
162
163 .. code-block: yaml
164
165 policies:
166 - name: deregister-definition
167 resource: batch-definition
168 filters:
169 - containerProperties.image: amazonlinux
170 actions:
171 - type: deregister
172 """
173 schema = type_schema('deregister')
174 permissions = ('batch:DeregisterJobDefinition',)
175 valid_origin_states = ('ACTIVE',)
176
177 def deregister_definition(self, r):
178 self.client.deregister_job_definition(
179 jobDefinition='%s:%s' % (r['jobDefinitionName'],
180 r['revision']))
181
182 def process(self, resources):
183 resources = self.filter_resource_state(
184 resources, 'status', self.valid_origin_states)
185 self.client = local_session(
186 self.manager.session_factory).client('batch')
187 with self.executor_factory(max_workers=2) as w:
188 list(w.map(self.deregister_definition, resources))
189
[end of c7n/resources/batch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/resources/batch.py b/c7n/resources/batch.py
--- a/c7n/resources/batch.py
+++ b/c7n/resources/batch.py
@@ -13,9 +13,11 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+from c7n.actions import BaseAction
+from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager
-from c7n.actions import BaseAction
+
from c7n.utils import local_session, type_schema
@@ -32,6 +34,18 @@
'describe_compute_environments', 'computeEnvironments', None)
[email protected]_registry.register('security-group')
+class ComputeSGFilter(SecurityGroupFilter):
+
+ RelatedIdsExpression = "computeResources.securityGroupIds"
+
+
[email protected]_registry.register('subnet')
+class ComputeSubnetFilter(SubnetFilter):
+
+ RelatedIdsExpression = "computeResources.subnets"
+
+
@resources.register('batch-definition')
class JobDefinition(QueryResourceManager):
| {"golden_diff": "diff --git a/c7n/resources/batch.py b/c7n/resources/batch.py\n--- a/c7n/resources/batch.py\n+++ b/c7n/resources/batch.py\n@@ -13,9 +13,11 @@\n # limitations under the License.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+from c7n.actions import BaseAction\n+from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter\n from c7n.manager import resources\n from c7n.query import QueryResourceManager\n-from c7n.actions import BaseAction\n+\n from c7n.utils import local_session, type_schema\n \n \n@@ -32,6 +34,18 @@\n 'describe_compute_environments', 'computeEnvironments', None)\n \n \[email protected]_registry.register('security-group')\n+class ComputeSGFilter(SecurityGroupFilter):\n+\n+ RelatedIdsExpression = \"computeResources.securityGroupIds\"\n+\n+\[email protected]_registry.register('subnet')\n+class ComputeSubnetFilter(SubnetFilter):\n+\n+ RelatedIdsExpression = \"computeResources.subnets\"\n+\n+\n @resources.register('batch-definition')\n class JobDefinition(QueryResourceManager):\n", "issue": "batch-compute resource security-group filter support\nAdd a security-group filter to the batch-compute resource using the securityGroupIds attribute\r\n \n", "before_files": [{"content": "# Copyright 2017-2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.actions import BaseAction\nfrom c7n.utils import local_session, type_schema\n\n\[email protected]('batch-compute')\nclass ComputeEnvironment(QueryResourceManager):\n\n class resource_type(object):\n service = 'batch'\n filter_name = 'computeEnvironments'\n filter_type = 'list'\n dimension = None\n id = name = \"computeEnvironmentName\"\n enum_spec = (\n 'describe_compute_environments', 'computeEnvironments', None)\n\n\[email protected]('batch-definition')\nclass JobDefinition(QueryResourceManager):\n\n class resource_type(object):\n service = 'batch'\n filter_name = 'jobDefinitions'\n filter_type = 'list'\n dimension = None\n id = name = \"jobDefinitionName\"\n enum_spec = (\n 'describe_job_definitions', 'jobDefinitions', None)\n\n\nclass StateTransitionFilter(object):\n \"\"\"Filter resources by state.\n\n Try to simplify construction for policy authors by automatically\n filtering elements (filters or actions) to the resource states\n they are valid for.\n \"\"\"\n valid_origin_states = ()\n\n def filter_resource_state(self, resources, key, states=None):\n states = states or self.valid_origin_states\n if not states:\n return resources\n orig_length = len(resources)\n results = [r for r in resources if r[key] in states]\n if orig_length != len(results):\n self.log.warn(\n \"%s implicitly filtered %d of %d resources with valid %s\" % (\n self.__class__.__name__,\n len(results), orig_length, key.lower()))\n return results\n\n\[email protected]_registry.register('update-environment')\nclass UpdateComputeEnvironment(BaseAction, StateTransitionFilter):\n \"\"\"Updates an AWS batch compute environment\n\n :example:\n\n .. code-block: yaml\n\n policies:\n - name: update-environments\n resource: batch-compute\n filters:\n - computeResources.desiredvCpus: 0\n - state: ENABLED\n actions:\n - type: update-environment\n state: DISABLED\n \"\"\"\n schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'type': {'enum': ['update-environment']},\n 'computeEnvironment': {'type': 'string'},\n 'state': {'type': 'string', 'enum': ['ENABLED', 'DISABLED']},\n 'computeResources': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'minvCpus': {'type': 'integer'},\n 'maxvCpus': {'type': 'integer'},\n 'desiredvCpus': {'type': 'integer'}\n }\n },\n 'serviceRole': {'type': 'string'}\n }\n }\n permissions = ('batch:UpdateComputeEnvironment',)\n valid_origin_status = ('VALID', 'INVALID')\n\n def process(self, resources):\n resources = self.filter_resource_state(\n resources, 'status', self.valid_origin_status)\n client = local_session(self.manager.session_factory).client('batch')\n params = dict(self.data)\n params.pop('type')\n for r in resources:\n params['computeEnvironment'] = r['computeEnvironmentName']\n client.update_compute_environment(**params)\n\n\[email protected]_registry.register('delete')\nclass DeleteComputeEnvironment(BaseAction, StateTransitionFilter):\n \"\"\"Delete an AWS batch compute environment\n\n :example:\n\n .. code-block: yaml\n\n policies:\n - name: delete-environments\n resource: batch-compute\n filters:\n - computeResources.desiredvCpus: 0\n action:\n - type: delete\n \"\"\"\n schema = type_schema('delete')\n permissions = ('batch:DeleteComputeEnvironment',)\n valid_origin_states = ('DISABLED',)\n valid_origin_status = ('VALID', 'INVALID')\n\n def delete_environment(self, r):\n client = local_session(self.manager.session_factory).client('batch')\n client.delete_compute_environment(\n computeEnvironment=r['computeEnvironmentName'])\n\n def process(self, resources):\n resources = self.filter_resource_state(\n self.filter_resource_state(\n resources, 'state', self.valid_origin_states),\n 'status', self.valid_origin_status)\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.delete_environment, resources))\n\n\[email protected]_registry.register('deregister')\nclass DefinitionDeregister(BaseAction, StateTransitionFilter):\n \"\"\"Deregisters a batch definition\n\n :example:\n\n .. code-block: yaml\n\n policies:\n - name: deregister-definition\n resource: batch-definition\n filters:\n - containerProperties.image: amazonlinux\n actions:\n - type: deregister\n \"\"\"\n schema = type_schema('deregister')\n permissions = ('batch:DeregisterJobDefinition',)\n valid_origin_states = ('ACTIVE',)\n\n def deregister_definition(self, r):\n self.client.deregister_job_definition(\n jobDefinition='%s:%s' % (r['jobDefinitionName'],\n r['revision']))\n\n def process(self, resources):\n resources = self.filter_resource_state(\n resources, 'status', self.valid_origin_states)\n self.client = local_session(\n self.manager.session_factory).client('batch')\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.deregister_definition, resources))\n", "path": "c7n/resources/batch.py"}]} | 2,353 | 255 |
gh_patches_debug_17384 | rasdani/github-patches | git_diff | azavea__raster-vision-988 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Verbose flag is not passed to remote jobs
The `-v` flag works with the inprocess runner, but doesn't seem to get forwarded to remote jobs when using the batch runner.
</issue>
<code>
[start of rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py]
1 import copy
2 import logging
3 import os
4 import uuid
5 from inspect import signature
6 from typing import List, Optional
7
8 from rastervision.pipeline import rv_config
9 from rastervision.pipeline.runner import Runner
10
11 log = logging.getLogger(__name__)
12 AWS_BATCH = 'batch'
13
14
15 def submit_job(cmd: List[str],
16 job_name: str,
17 debug: bool = False,
18 profile: str = False,
19 attempts: int = 5,
20 parent_job_ids: List[str] = None,
21 num_array_jobs: Optional[int] = None,
22 use_gpu: bool = False,
23 job_queue: Optional[str] = None,
24 job_def: Optional[str] = None) -> str:
25 """Submit a job to run on AWS Batch.
26
27 Args:
28 cmd: a command to run in the Docker container for the remote job
29 debug: if True, run the command using a ptvsd wrapper which sets up a remote
30 VS Code Python debugger server
31 profile: if True, run the command using kernprof, a line profiler
32 attempts: the number of times to try running the command which is useful
33 in case of failure.
34 parent_job_ids: optional list of parent Batch job ids. The job created by this
35 will only run after the parent jobs complete successfully.
36 num_array_jobs: if set, make this a Batch array job with size equal to
37 num_array_jobs
38 use_gpu: if True, run the job in a GPU-enabled queue
39 job_queue: if set, use this job queue
40 job_def: if set, use this job definition
41 """
42 batch_config = rv_config.get_namespace_config(AWS_BATCH)
43
44 if job_queue is None:
45 if use_gpu:
46 job_queue = batch_config('gpu_job_queue')
47 else:
48 job_queue = batch_config('cpu_job_queue')
49
50 if job_def is None:
51 if use_gpu:
52 job_def = batch_config('gpu_job_def')
53 else:
54 job_def = batch_config('cpu_job_def')
55
56 import boto3
57 client = boto3.client('batch')
58
59 cmd_list = cmd.split(' ')
60 if debug:
61 cmd_list = [
62 'python', '-m', 'ptvsd', '--host', '0.0.0.0', '--port', '6006',
63 '--wait', '-m'
64 ] + cmd_list
65
66 if profile:
67 cmd_list = ['kernprof', '-v', '-l'] + cmd_list
68
69 kwargs = {
70 'jobName': job_name,
71 'jobQueue': job_queue,
72 'jobDefinition': job_def,
73 'containerOverrides': {
74 'command': cmd_list
75 },
76 'retryStrategy': {
77 'attempts': attempts
78 },
79 }
80 if parent_job_ids:
81 kwargs['dependsOn'] = [{'jobId': id} for id in parent_job_ids]
82 if num_array_jobs:
83 kwargs['arrayProperties'] = {'size': num_array_jobs}
84
85 job_id = client.submit_job(**kwargs)['jobId']
86 msg = 'submitted job with jobName={} and jobId={} w/ parent(s)={}'.format(
87 job_name, job_id, parent_job_ids)
88 log.info(msg)
89 log.info(cmd_list)
90
91 return job_id
92
93
94 class AWSBatchRunner(Runner):
95 """Runs pipelines remotely using AWS Batch.
96
97 Requires Everett configuration of form:
98
99 ```
100 [AWS_BATCH]
101 cpu_job_queue=
102 cpu_job_def=
103 gpu_job_queue=
104 gpu_job_def=
105 attempts=
106 ```
107 """
108
109 def run(self,
110 cfg_json_uri,
111 pipeline,
112 commands,
113 num_splits=1,
114 pipeline_run_name: str = 'raster-vision'):
115 parent_job_ids = []
116
117 # pipeline-specific job queue
118 if hasattr(pipeline, 'job_queue'):
119 pipeline_job_queue = pipeline.job_queue
120 else:
121 pipeline_job_queue = None
122
123 # pipeline-specific job definition
124 if hasattr(pipeline, 'job_def'):
125 pipeline_job_def = pipeline.job_def
126 else:
127 pipeline_job_def = None
128
129 for command in commands:
130
131 # detect external command
132 if hasattr(pipeline, command):
133 fn = getattr(pipeline, command)
134 params = signature(fn).parameters
135 external = hasattr(fn, 'external') and len(params) in {0, 1}
136 array_job_capable = hasattr(fn, 'array_job_capable') \
137 and fn.array_job_capable
138 else:
139 external = False
140 array_job_capable = False
141
142 # command-specific job queue, job definition
143 job_def = pipeline_job_def
144 job_queue = pipeline_job_queue
145 if hasattr(pipeline, command):
146 fn = getattr(pipeline, command)
147 if hasattr(fn, 'job_def'):
148 job_def = fn.job_def
149 if hasattr(fn, 'job_queue'):
150 job_queue = fn.job_queue
151
152 num_array_jobs = None
153 use_gpu = command in pipeline.gpu_commands
154
155 job_name = f'{pipeline_run_name}-{command}-{uuid.uuid4()}'
156
157 if not external:
158 cmd = [
159 'python', '-m', 'rastervision.pipeline.cli run_command',
160 cfg_json_uri, command, '--runner', AWS_BATCH
161 ]
162 if command in pipeline.split_commands and num_splits > 1:
163 num_array_jobs = num_splits
164 cmd += ['--num-splits', str(num_splits)]
165 job_id = submit_job(
166 cmd=' '.join(cmd),
167 job_name=job_name,
168 parent_job_ids=parent_job_ids,
169 num_array_jobs=num_array_jobs,
170 use_gpu=use_gpu,
171 job_queue=job_queue,
172 job_def=job_def)
173 parent_job_ids = [job_id]
174 else:
175 if command in pipeline.split_commands and num_splits > 1:
176 if len(params) == 1 and array_job_capable:
177 cmd = fn(-num_splits)
178 num_array_jobs = num_splits
179 job_id = submit_job(
180 cmd=' '.join(cmd),
181 job_name=job_name,
182 parent_job_ids=parent_job_ids,
183 num_array_jobs=num_array_jobs,
184 use_gpu=use_gpu,
185 job_queue=job_queue,
186 job_def=job_def)
187 parent_job_ids = [job_id]
188 elif len(params) == 1 and not array_job_capable:
189 num_array_jobs = None
190 new_parent_job_ids = []
191 for cmd in fn(num_splits):
192 job_id = submit_job(
193 cmd=' '.join(cmd),
194 job_name=job_name,
195 parent_job_ids=parent_job_ids,
196 num_array_jobs=num_array_jobs,
197 use_gpu=use_gpu,
198 job_queue=job_queue,
199 job_def=job_def)
200 new_parent_job_ids.append(job_id)
201 parent_job_ids = copy.copy(new_parent_job_ids)
202 elif len(params) == 0:
203 cmd = fn()
204 num_array_jobs = None
205 job_id = submit_job(
206 cmd=' '.join(cmd),
207 job_name=job_name,
208 parent_job_ids=parent_job_ids,
209 num_array_jobs=num_array_jobs,
210 use_gpu=use_gpu,
211 job_queue=job_queue,
212 job_def=job_def)
213 parent_job_ids = [job_id]
214 else:
215 if len(params) == 0:
216 cmd = fn()
217 elif len(params) == 1:
218 cmd = fn(1)[0]
219 num_array_jobs = 1
220 job_id = submit_job(
221 cmd=' '.join(cmd),
222 job_name=job_name,
223 parent_job_ids=parent_job_ids,
224 num_array_jobs=num_array_jobs,
225 use_gpu=use_gpu,
226 job_queue=job_queue,
227 job_def=job_def)
228 parent_job_ids = [job_id]
229
230 job_queue = None
231 job_def = None
232
233 def get_split_ind(self):
234 return int(os.environ.get('AWS_BATCH_JOB_ARRAY_INDEX', 0))
235
[end of rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py b/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py
--- a/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py
+++ b/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py
@@ -155,10 +155,13 @@
job_name = f'{pipeline_run_name}-{command}-{uuid.uuid4()}'
if not external:
- cmd = [
- 'python', '-m', 'rastervision.pipeline.cli run_command',
- cfg_json_uri, command, '--runner', AWS_BATCH
- ]
+ cmd = ['python', '-m', 'rastervision.pipeline.cli']
+ if rv_config.get_verbosity() > 1:
+ cmd.append('-' + 'v' * (rv_config.get_verbosity() - 1))
+ cmd.extend([
+ 'run_command', cfg_json_uri, command, '--runner', AWS_BATCH
+ ])
+
if command in pipeline.split_commands and num_splits > 1:
num_array_jobs = num_splits
cmd += ['--num-splits', str(num_splits)]
| {"golden_diff": "diff --git a/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py b/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py\n--- a/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py\n+++ b/rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py\n@@ -155,10 +155,13 @@\n job_name = f'{pipeline_run_name}-{command}-{uuid.uuid4()}'\n \n if not external:\n- cmd = [\n- 'python', '-m', 'rastervision.pipeline.cli run_command',\n- cfg_json_uri, command, '--runner', AWS_BATCH\n- ]\n+ cmd = ['python', '-m', 'rastervision.pipeline.cli']\n+ if rv_config.get_verbosity() > 1:\n+ cmd.append('-' + 'v' * (rv_config.get_verbosity() - 1))\n+ cmd.extend([\n+ 'run_command', cfg_json_uri, command, '--runner', AWS_BATCH\n+ ])\n+\n if command in pipeline.split_commands and num_splits > 1:\n num_array_jobs = num_splits\n cmd += ['--num-splits', str(num_splits)]\n", "issue": "Verbose flag is not passed to remote jobs\nThe `-v` flag works with the inprocess runner, but doesn't seem to get forwarded to remote jobs when using the batch runner.\n", "before_files": [{"content": "import copy\nimport logging\nimport os\nimport uuid\nfrom inspect import signature\nfrom typing import List, Optional\n\nfrom rastervision.pipeline import rv_config\nfrom rastervision.pipeline.runner import Runner\n\nlog = logging.getLogger(__name__)\nAWS_BATCH = 'batch'\n\n\ndef submit_job(cmd: List[str],\n job_name: str,\n debug: bool = False,\n profile: str = False,\n attempts: int = 5,\n parent_job_ids: List[str] = None,\n num_array_jobs: Optional[int] = None,\n use_gpu: bool = False,\n job_queue: Optional[str] = None,\n job_def: Optional[str] = None) -> str:\n \"\"\"Submit a job to run on AWS Batch.\n\n Args:\n cmd: a command to run in the Docker container for the remote job\n debug: if True, run the command using a ptvsd wrapper which sets up a remote\n VS Code Python debugger server\n profile: if True, run the command using kernprof, a line profiler\n attempts: the number of times to try running the command which is useful\n in case of failure.\n parent_job_ids: optional list of parent Batch job ids. The job created by this\n will only run after the parent jobs complete successfully.\n num_array_jobs: if set, make this a Batch array job with size equal to\n num_array_jobs\n use_gpu: if True, run the job in a GPU-enabled queue\n job_queue: if set, use this job queue\n job_def: if set, use this job definition\n \"\"\"\n batch_config = rv_config.get_namespace_config(AWS_BATCH)\n\n if job_queue is None:\n if use_gpu:\n job_queue = batch_config('gpu_job_queue')\n else:\n job_queue = batch_config('cpu_job_queue')\n\n if job_def is None:\n if use_gpu:\n job_def = batch_config('gpu_job_def')\n else:\n job_def = batch_config('cpu_job_def')\n\n import boto3\n client = boto3.client('batch')\n\n cmd_list = cmd.split(' ')\n if debug:\n cmd_list = [\n 'python', '-m', 'ptvsd', '--host', '0.0.0.0', '--port', '6006',\n '--wait', '-m'\n ] + cmd_list\n\n if profile:\n cmd_list = ['kernprof', '-v', '-l'] + cmd_list\n\n kwargs = {\n 'jobName': job_name,\n 'jobQueue': job_queue,\n 'jobDefinition': job_def,\n 'containerOverrides': {\n 'command': cmd_list\n },\n 'retryStrategy': {\n 'attempts': attempts\n },\n }\n if parent_job_ids:\n kwargs['dependsOn'] = [{'jobId': id} for id in parent_job_ids]\n if num_array_jobs:\n kwargs['arrayProperties'] = {'size': num_array_jobs}\n\n job_id = client.submit_job(**kwargs)['jobId']\n msg = 'submitted job with jobName={} and jobId={} w/ parent(s)={}'.format(\n job_name, job_id, parent_job_ids)\n log.info(msg)\n log.info(cmd_list)\n\n return job_id\n\n\nclass AWSBatchRunner(Runner):\n \"\"\"Runs pipelines remotely using AWS Batch.\n\n Requires Everett configuration of form:\n\n ```\n [AWS_BATCH]\n cpu_job_queue=\n cpu_job_def=\n gpu_job_queue=\n gpu_job_def=\n attempts=\n ```\n \"\"\"\n\n def run(self,\n cfg_json_uri,\n pipeline,\n commands,\n num_splits=1,\n pipeline_run_name: str = 'raster-vision'):\n parent_job_ids = []\n\n # pipeline-specific job queue\n if hasattr(pipeline, 'job_queue'):\n pipeline_job_queue = pipeline.job_queue\n else:\n pipeline_job_queue = None\n\n # pipeline-specific job definition\n if hasattr(pipeline, 'job_def'):\n pipeline_job_def = pipeline.job_def\n else:\n pipeline_job_def = None\n\n for command in commands:\n\n # detect external command\n if hasattr(pipeline, command):\n fn = getattr(pipeline, command)\n params = signature(fn).parameters\n external = hasattr(fn, 'external') and len(params) in {0, 1}\n array_job_capable = hasattr(fn, 'array_job_capable') \\\n and fn.array_job_capable\n else:\n external = False\n array_job_capable = False\n\n # command-specific job queue, job definition\n job_def = pipeline_job_def\n job_queue = pipeline_job_queue\n if hasattr(pipeline, command):\n fn = getattr(pipeline, command)\n if hasattr(fn, 'job_def'):\n job_def = fn.job_def\n if hasattr(fn, 'job_queue'):\n job_queue = fn.job_queue\n\n num_array_jobs = None\n use_gpu = command in pipeline.gpu_commands\n\n job_name = f'{pipeline_run_name}-{command}-{uuid.uuid4()}'\n\n if not external:\n cmd = [\n 'python', '-m', 'rastervision.pipeline.cli run_command',\n cfg_json_uri, command, '--runner', AWS_BATCH\n ]\n if command in pipeline.split_commands and num_splits > 1:\n num_array_jobs = num_splits\n cmd += ['--num-splits', str(num_splits)]\n job_id = submit_job(\n cmd=' '.join(cmd),\n job_name=job_name,\n parent_job_ids=parent_job_ids,\n num_array_jobs=num_array_jobs,\n use_gpu=use_gpu,\n job_queue=job_queue,\n job_def=job_def)\n parent_job_ids = [job_id]\n else:\n if command in pipeline.split_commands and num_splits > 1:\n if len(params) == 1 and array_job_capable:\n cmd = fn(-num_splits)\n num_array_jobs = num_splits\n job_id = submit_job(\n cmd=' '.join(cmd),\n job_name=job_name,\n parent_job_ids=parent_job_ids,\n num_array_jobs=num_array_jobs,\n use_gpu=use_gpu,\n job_queue=job_queue,\n job_def=job_def)\n parent_job_ids = [job_id]\n elif len(params) == 1 and not array_job_capable:\n num_array_jobs = None\n new_parent_job_ids = []\n for cmd in fn(num_splits):\n job_id = submit_job(\n cmd=' '.join(cmd),\n job_name=job_name,\n parent_job_ids=parent_job_ids,\n num_array_jobs=num_array_jobs,\n use_gpu=use_gpu,\n job_queue=job_queue,\n job_def=job_def)\n new_parent_job_ids.append(job_id)\n parent_job_ids = copy.copy(new_parent_job_ids)\n elif len(params) == 0:\n cmd = fn()\n num_array_jobs = None\n job_id = submit_job(\n cmd=' '.join(cmd),\n job_name=job_name,\n parent_job_ids=parent_job_ids,\n num_array_jobs=num_array_jobs,\n use_gpu=use_gpu,\n job_queue=job_queue,\n job_def=job_def)\n parent_job_ids = [job_id]\n else:\n if len(params) == 0:\n cmd = fn()\n elif len(params) == 1:\n cmd = fn(1)[0]\n num_array_jobs = 1\n job_id = submit_job(\n cmd=' '.join(cmd),\n job_name=job_name,\n parent_job_ids=parent_job_ids,\n num_array_jobs=num_array_jobs,\n use_gpu=use_gpu,\n job_queue=job_queue,\n job_def=job_def)\n parent_job_ids = [job_id]\n\n job_queue = None\n job_def = None\n\n def get_split_ind(self):\n return int(os.environ.get('AWS_BATCH_JOB_ARRAY_INDEX', 0))\n", "path": "rastervision_aws_batch/rastervision/aws_batch/aws_batch_runner.py"}]} | 2,904 | 274 |
gh_patches_debug_23323 | rasdani/github-patches | git_diff | facebookresearch__xformers-326 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rotary embeddings convert queries and keys to float32 when using mixed precision training
Not sure, if this is expected behaviour. The problem is that the cos_sin table in the RotaryEmbedding class is stored in float32 format, thus the returned queries and keys get converted to float32 aswell.
</issue>
<code>
[start of xformers/components/positional_embedding/rotary.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 # CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox
8 # NOTE: Almost the same right now, moving parts to Triton is the next step
9
10 from typing import Tuple
11
12 import torch
13
14
15 def rotate_half(x):
16 x1, x2 = x.chunk(2, dim=-1)
17 return torch.cat((-x2, x1), dim=-1)
18
19
20 @torch.jit.script
21 def apply_rotary_pos_emb(x, cos, sin):
22 # NOTE: This could probably be moved to Triton
23
24 # Handle a possible sequence length mismatch in between q and k
25 cos = cos[:, :, : x.shape[-2], :]
26 sin = sin[:, :, : x.shape[-2], :]
27
28 return (x * cos) + (rotate_half(x) * sin)
29
30
31 class RotaryEmbedding(torch.nn.Module):
32 """
33 The rotary position embeddings from RoFormer_ (Su et. al).
34 A crucial insight from the method is that the query and keys are
35 transformed by rotation matrices which depend on the relative positions.
36
37 Other implementations are available in the Rotary Transformer repo_ and in
38 GPT-NeoX_, GPT-NeoX was an inspiration
39
40 .. _RoFormer: https://arxiv.org/abs/2104.09864
41 .. _repo: https://github.com/ZhuiyiTechnology/roformer
42 .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
43
44
45 .. warning: Please note that this embedding is not registered on purpose, as it is transformative
46 (it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis
47 """
48
49 def __init__(self, dim_model: int, *_, **__):
50 super().__init__()
51 # Generate and save the inverse frequency buffer (non trainable)
52 inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))
53 self.register_buffer("inv_freq", inv_freq)
54
55 self._seq_len_cached = None
56 self._cos_cached = None
57 self._sin_cached = None
58
59 def _update_cos_sin_tables(self, x, seq_dimension=1):
60 seq_len = x.shape[seq_dimension]
61
62 # Reset the tables if the sequence length has changed,
63 # or if we're on a new device (possibly due to tracing for instance)
64 if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
65 self._seq_len_cached = seq_len
66 t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(
67 self.inv_freq
68 )
69 freqs = torch.einsum("i,j->ij", t, self.inv_freq)
70 emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
71
72 self._cos_cached = emb.cos()[None, None, :, :]
73 self._sin_cached = emb.sin()[None, None, :, :]
74
75 return self._cos_cached, self._sin_cached
76
77 def forward(
78 self, q: torch.Tensor, k: torch.Tensor
79 ) -> Tuple[torch.Tensor, torch.Tensor]:
80 self._cos_cached, self._sin_cached = self._update_cos_sin_tables(
81 k, seq_dimension=-2
82 )
83
84 return (
85 apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
86 apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
87 )
88
[end of xformers/components/positional_embedding/rotary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xformers/components/positional_embedding/rotary.py b/xformers/components/positional_embedding/rotary.py
--- a/xformers/components/positional_embedding/rotary.py
+++ b/xformers/components/positional_embedding/rotary.py
@@ -61,16 +61,20 @@
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
- if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
+ if (
+ seq_len != self._seq_len_cached
+ or self._cos_cached.device != x.device
+ or self._cos_cached.dtype != x.dtype
+ ):
self._seq_len_cached = seq_len
- t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(
- self.inv_freq
+ t = torch.arange(
+ x.shape[seq_dimension], device=x.device, dtype=torch.float32
)
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq.to(x.dtype))
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
- self._cos_cached = emb.cos()[None, None, :, :]
- self._sin_cached = emb.sin()[None, None, :, :]
+ self._cos_cached = emb.cos()[None, None, :, :].to(x.dtype)
+ self._sin_cached = emb.sin()[None, None, :, :].to(x.dtype)
return self._cos_cached, self._sin_cached
| {"golden_diff": "diff --git a/xformers/components/positional_embedding/rotary.py b/xformers/components/positional_embedding/rotary.py\n--- a/xformers/components/positional_embedding/rotary.py\n+++ b/xformers/components/positional_embedding/rotary.py\n@@ -61,16 +61,20 @@\n \n # Reset the tables if the sequence length has changed,\n # or if we're on a new device (possibly due to tracing for instance)\n- if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:\n+ if (\n+ seq_len != self._seq_len_cached\n+ or self._cos_cached.device != x.device\n+ or self._cos_cached.dtype != x.dtype\n+ ):\n self._seq_len_cached = seq_len\n- t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(\n- self.inv_freq\n+ t = torch.arange(\n+ x.shape[seq_dimension], device=x.device, dtype=torch.float32\n )\n- freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n+ freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq.to(x.dtype))\n emb = torch.cat((freqs, freqs), dim=-1).to(x.device)\n \n- self._cos_cached = emb.cos()[None, None, :, :]\n- self._sin_cached = emb.sin()[None, None, :, :]\n+ self._cos_cached = emb.cos()[None, None, :, :].to(x.dtype)\n+ self._sin_cached = emb.sin()[None, None, :, :].to(x.dtype)\n \n return self._cos_cached, self._sin_cached\n", "issue": "Rotary embeddings convert queries and keys to float32 when using mixed precision training\nNot sure, if this is expected behaviour. The problem is that the cos_sin table in the RotaryEmbedding class is stored in float32 format, thus the returned queries and keys get converted to float32 aswell.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# CREDITS: This implementation is inspired by GPT-NeoX https://github.com/EleutherAI/gpt-neox\n# NOTE: Almost the same right now, moving parts to Triton is the next step\n\nfrom typing import Tuple\n\nimport torch\n\n\ndef rotate_half(x):\n x1, x2 = x.chunk(2, dim=-1)\n return torch.cat((-x2, x1), dim=-1)\n\n\[email protected]\ndef apply_rotary_pos_emb(x, cos, sin):\n # NOTE: This could probably be moved to Triton\n\n # Handle a possible sequence length mismatch in between q and k\n cos = cos[:, :, : x.shape[-2], :]\n sin = sin[:, :, : x.shape[-2], :]\n\n return (x * cos) + (rotate_half(x) * sin)\n\n\nclass RotaryEmbedding(torch.nn.Module):\n \"\"\"\n The rotary position embeddings from RoFormer_ (Su et. al).\n A crucial insight from the method is that the query and keys are\n transformed by rotation matrices which depend on the relative positions.\n\n Other implementations are available in the Rotary Transformer repo_ and in\n GPT-NeoX_, GPT-NeoX was an inspiration\n\n .. _RoFormer: https://arxiv.org/abs/2104.09864\n .. _repo: https://github.com/ZhuiyiTechnology/roformer\n .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox\n\n\n .. warning: Please note that this embedding is not registered on purpose, as it is transformative\n (it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis\n \"\"\"\n\n def __init__(self, dim_model: int, *_, **__):\n super().__init__()\n # Generate and save the inverse frequency buffer (non trainable)\n inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))\n self.register_buffer(\"inv_freq\", inv_freq)\n\n self._seq_len_cached = None\n self._cos_cached = None\n self._sin_cached = None\n\n def _update_cos_sin_tables(self, x, seq_dimension=1):\n seq_len = x.shape[seq_dimension]\n\n # Reset the tables if the sequence length has changed,\n # or if we're on a new device (possibly due to tracing for instance)\n if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:\n self._seq_len_cached = seq_len\n t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(\n self.inv_freq\n )\n freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n emb = torch.cat((freqs, freqs), dim=-1).to(x.device)\n\n self._cos_cached = emb.cos()[None, None, :, :]\n self._sin_cached = emb.sin()[None, None, :, :]\n\n return self._cos_cached, self._sin_cached\n\n def forward(\n self, q: torch.Tensor, k: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n self._cos_cached, self._sin_cached = self._update_cos_sin_tables(\n k, seq_dimension=-2\n )\n\n return (\n apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),\n apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),\n )\n", "path": "xformers/components/positional_embedding/rotary.py"}]} | 1,616 | 383 |
gh_patches_debug_13792 | rasdani/github-patches | git_diff | kubeflow__pipelines-7985 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] TFJob launcher pipeline task fails when `delete_finished_tfjob` flag is `True`
https://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/components/kubeflow/launcher/component.yaml#L19
When `delete_finished_tfjob` flag is `True`, TFJob launcher task fails with error below.
```
Traceback (most recent call last):
File "/ml/launch_tfjob.py", line 136, in <module>
main()
File "/ml/launch_tfjob.py", line 133, in main
tfjob.delete(args.name, args.namespace)
File "/ml/launch_crd.py", line 115, in delete
body)
TypeError: delete_namespaced_custom_object() takes exactly 6 arguments (7 given)
```
I think it's some kind of kubernetes client SDK version issue in `nikenano/launchernew:latest` container image.
</issue>
<code>
[start of components/kubeflow/common/launch_crd.py]
1 # Copyright 2019 kubeflow.org.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import datetime
15 import json
16 import logging
17 import multiprocessing
18 import time
19
20 from kubernetes import client as k8s_client
21 from kubernetes.client import rest
22
23 logger = logging.getLogger(__name__)
24
25 class K8sCR(object):
26 def __init__(self, group, plural, version, client):
27 self.group = group
28 self.plural = plural
29 self.version = version
30 self.client = k8s_client.CustomObjectsApi(client)
31
32 def wait_for_condition(self,
33 namespace,
34 name,
35 expected_conditions=[],
36 timeout=datetime.timedelta(days=365),
37 polling_interval=datetime.timedelta(seconds=30),
38 status_callback=None):
39 """Waits until any of the specified conditions occur.
40 Args:
41 namespace: namespace for the CR.
42 name: Name of the CR.
43 expected_conditions: A list of conditions. Function waits until any of the
44 supplied conditions is reached.
45 timeout: How long to wait for the CR.
46 polling_interval: How often to poll for the status of the CR.
47 status_callback: (Optional): Callable. If supplied this callable is
48 invoked after we poll the CR. Callable takes a single argument which
49 is the CR.
50 """
51 end_time = datetime.datetime.now() + timeout
52 while True:
53 try:
54 results = self.client.get_namespaced_custom_object(
55 self.group, self.version, namespace, self.plural, name)
56 except Exception as e:
57 logger.error("There was a problem waiting for %s/%s %s in namespace %s; Exception: %s",
58 self.group, self.plural, name, namespace, e)
59 raise
60
61 if results:
62 if status_callback:
63 status_callback(results)
64 expected, condition = self.is_expected_conditions(results, expected_conditions)
65 if expected:
66 logger.info("%s/%s %s in namespace %s has reached the expected condition: %s.",
67 self.group, self.plural, name, namespace, condition)
68 return results
69 else:
70 if condition:
71 logger.info("Current condition of %s/%s %s in namespace %s is %s.",
72 self.group, self.plural, name, namespace, condition)
73
74 if datetime.datetime.now() + polling_interval > end_time:
75 raise Exception(
76 "Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the "
77 "conditions {4}.".format(self.group, self.plural, name, namespace, expected_conditions))
78
79 time.sleep(polling_interval.seconds)
80
81 def is_expected_conditions(self, inst, expected_conditions):
82 conditions = inst.get('status', {}).get("conditions")
83 if not conditions:
84 return False, ""
85 if conditions[-1]["type"] in expected_conditions and conditions[-1]["status"] == "True":
86 return True, conditions[-1]["type"]
87 else:
88 return False, conditions[-1]["type"]
89
90 def create(self, spec):
91 """Create a CR.
92 Args:
93 spec: The spec for the CR.
94 """
95 try:
96 # Create a Resource
97 namespace = spec["metadata"].get("namespace", "default")
98 logger.info("Creating %s/%s %s in namespace %s.",
99 self.group, self.plural, spec["metadata"]["name"], namespace)
100 api_response = self.client.create_namespaced_custom_object(
101 self.group, self.version, namespace, self.plural, spec)
102 logger.info("Created %s/%s %s in namespace %s.",
103 self.group, self.plural, spec["metadata"]["name"], namespace)
104 return api_response
105 except rest.ApiException as e:
106 self._log_and_raise_exception(e, "create")
107
108 def delete(self, name, namespace):
109 try:
110 body = {
111 # Set garbage collection so that CR won't be deleted until all
112 # owned references are deleted.
113 "propagationPolicy": "Foreground",
114 }
115 logger.info("Deleteing %s/%s %s in namespace %s.",
116 self.group, self.plural, name, namespace)
117 api_response = self.client.delete_namespaced_custom_object(
118 self.group,
119 self.version,
120 namespace,
121 self.plural,
122 name,
123 body)
124 logger.info("Deleted %s/%s %s in namespace %s.",
125 self.group, self.plural, name, namespace)
126 return api_response
127 except rest.ApiException as e:
128 self._log_and_raise_exception(e, "delete")
129
130 def _log_and_raise_exception(self, ex, action):
131 message = ""
132 if ex.message:
133 message = ex.message
134 if ex.body:
135 try:
136 body = json.loads(ex.body)
137 message = body.get("message")
138 except ValueError:
139 logger.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
140 raise
141
142 logger.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
143 raise ex
144
145
[end of components/kubeflow/common/launch_crd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/components/kubeflow/common/launch_crd.py b/components/kubeflow/common/launch_crd.py
--- a/components/kubeflow/common/launch_crd.py
+++ b/components/kubeflow/common/launch_crd.py
@@ -115,12 +115,12 @@
logger.info("Deleteing %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
api_response = self.client.delete_namespaced_custom_object(
- self.group,
- self.version,
- namespace,
- self.plural,
- name,
- body)
+ group=self.group,
+ version=self.version,
+ namespace=namespace,
+ plural=self.plural,
+ name=name,
+ body=body)
logger.info("Deleted %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
return api_response
| {"golden_diff": "diff --git a/components/kubeflow/common/launch_crd.py b/components/kubeflow/common/launch_crd.py\n--- a/components/kubeflow/common/launch_crd.py\n+++ b/components/kubeflow/common/launch_crd.py\n@@ -115,12 +115,12 @@\n logger.info(\"Deleteing %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n api_response = self.client.delete_namespaced_custom_object(\n- self.group,\n- self.version,\n- namespace,\n- self.plural,\n- name,\n- body)\n+ group=self.group,\n+ version=self.version,\n+ namespace=namespace,\n+ plural=self.plural,\n+ name=name,\n+ body=body)\n logger.info(\"Deleted %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n return api_response\n", "issue": "[bug] TFJob launcher pipeline task fails when `delete_finished_tfjob` flag is `True`\nhttps://github.com/kubeflow/pipelines/blob/127dab4d4671849d596c455f4619ea807d22f6ea/components/kubeflow/launcher/component.yaml#L19\r\n\r\nWhen `delete_finished_tfjob` flag is `True`, TFJob launcher task fails with error below.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/ml/launch_tfjob.py\", line 136, in <module>\r\n main()\r\n File \"/ml/launch_tfjob.py\", line 133, in main\r\n tfjob.delete(args.name, args.namespace)\r\n File \"/ml/launch_crd.py\", line 115, in delete\r\n body)\r\nTypeError: delete_namespaced_custom_object() takes exactly 6 arguments (7 given)\r\n```\r\n\r\nI think it's some kind of kubernetes client SDK version issue in `nikenano/launchernew:latest` container image.\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport json\nimport logging\nimport multiprocessing\nimport time\n\nfrom kubernetes import client as k8s_client\nfrom kubernetes.client import rest\n\nlogger = logging.getLogger(__name__)\n\nclass K8sCR(object):\n def __init__(self, group, plural, version, client):\n self.group = group\n self.plural = plural\n self.version = version\n self.client = k8s_client.CustomObjectsApi(client)\n\n def wait_for_condition(self,\n namespace,\n name,\n expected_conditions=[],\n timeout=datetime.timedelta(days=365),\n polling_interval=datetime.timedelta(seconds=30),\n status_callback=None):\n \"\"\"Waits until any of the specified conditions occur.\n Args:\n namespace: namespace for the CR.\n name: Name of the CR.\n expected_conditions: A list of conditions. Function waits until any of the\n supplied conditions is reached.\n timeout: How long to wait for the CR.\n polling_interval: How often to poll for the status of the CR.\n status_callback: (Optional): Callable. If supplied this callable is\n invoked after we poll the CR. Callable takes a single argument which\n is the CR.\n \"\"\"\n end_time = datetime.datetime.now() + timeout\n while True:\n try:\n results = self.client.get_namespaced_custom_object(\n self.group, self.version, namespace, self.plural, name)\n except Exception as e:\n logger.error(\"There was a problem waiting for %s/%s %s in namespace %s; Exception: %s\",\n self.group, self.plural, name, namespace, e)\n raise\n\n if results:\n if status_callback:\n status_callback(results)\n expected, condition = self.is_expected_conditions(results, expected_conditions)\n if expected:\n logger.info(\"%s/%s %s in namespace %s has reached the expected condition: %s.\",\n self.group, self.plural, name, namespace, condition)\n return results\n else:\n if condition:\n logger.info(\"Current condition of %s/%s %s in namespace %s is %s.\",\n self.group, self.plural, name, namespace, condition)\n\n if datetime.datetime.now() + polling_interval > end_time:\n raise Exception(\n \"Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the \"\n \"conditions {4}.\".format(self.group, self.plural, name, namespace, expected_conditions))\n\n time.sleep(polling_interval.seconds)\n\n def is_expected_conditions(self, inst, expected_conditions):\n conditions = inst.get('status', {}).get(\"conditions\")\n if not conditions:\n return False, \"\"\n if conditions[-1][\"type\"] in expected_conditions and conditions[-1][\"status\"] == \"True\":\n return True, conditions[-1][\"type\"]\n else:\n return False, conditions[-1][\"type\"]\n\n def create(self, spec):\n \"\"\"Create a CR.\n Args:\n spec: The spec for the CR.\n \"\"\"\n try:\n # Create a Resource\n namespace = spec[\"metadata\"].get(\"namespace\", \"default\")\n logger.info(\"Creating %s/%s %s in namespace %s.\",\n self.group, self.plural, spec[\"metadata\"][\"name\"], namespace)\n api_response = self.client.create_namespaced_custom_object(\n self.group, self.version, namespace, self.plural, spec)\n logger.info(\"Created %s/%s %s in namespace %s.\",\n self.group, self.plural, spec[\"metadata\"][\"name\"], namespace)\n return api_response\n except rest.ApiException as e:\n self._log_and_raise_exception(e, \"create\")\n\n def delete(self, name, namespace):\n try:\n body = {\n # Set garbage collection so that CR won't be deleted until all\n # owned references are deleted.\n \"propagationPolicy\": \"Foreground\",\n }\n logger.info(\"Deleteing %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n api_response = self.client.delete_namespaced_custom_object(\n self.group,\n self.version,\n namespace,\n self.plural,\n name,\n body)\n logger.info(\"Deleted %s/%s %s in namespace %s.\",\n self.group, self.plural, name, namespace)\n return api_response\n except rest.ApiException as e:\n self._log_and_raise_exception(e, \"delete\")\n\n def _log_and_raise_exception(self, ex, action):\n message = \"\"\n if ex.message:\n message = ex.message\n if ex.body:\n try:\n body = json.loads(ex.body)\n message = body.get(\"message\")\n except ValueError:\n logger.error(\"Exception when %s %s/%s: %s\", action, self.group, self.plural, ex.body)\n raise\n\n logger.error(\"Exception when %s %s/%s: %s\", action, self.group, self.plural, ex.body)\n raise ex\n\n", "path": "components/kubeflow/common/launch_crd.py"}]} | 2,317 | 210 |
gh_patches_debug_18196 | rasdani/github-patches | git_diff | joke2k__faker-1443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding ABA Routing Number to Bank Provider
* Faker version:8.1.2
Is there any interest in adding ABA routing numbers to the Bank provider? https://en.wikipedia.org/wiki/ABA_routing_transit_number
This would be helpful for my use case and have code to generate but wanted to check before putting more effort in.
</issue>
<code>
[start of faker/providers/bank/__init__.py]
1 import re
2 import string
3
4 from string import ascii_uppercase
5
6 from .. import BaseProvider
7
8 localized = True
9 default_locale = 'en_GB'
10
11
12 class Provider(BaseProvider):
13 """Implement default bank provider for Faker.
14
15 .. important::
16 Bank codes, account numbers, and other ID's generated by this provider
17 are only valid in form, i.e. they conform to some standard/format, are
18 of the expected lengths, and have valid checksums (where applicable).
19 Results generated that turn out to be valid in real life are purely
20 coincidental.
21
22 Sources:
23
24 - https://en.wikipedia.org/wiki/International_Bank_Account_Number
25 - https://www.theswiftcodes.com/swift-code-checker/
26 """
27
28 ALPHA = {c: str(ord(c) % 55) for c in string.ascii_uppercase}
29 bban_format = '????#############'
30 country_code = 'GB'
31
32 def bank_country(self):
33 """Generate the bank provider's ISO 3166-1 alpha-2 country code."""
34 return self.country_code
35
36 def bban(self):
37 """Generate a Basic Bank Account Number (BBAN)."""
38 temp = re.sub(r'\?',
39 lambda x: self.random_element(ascii_uppercase),
40 self.bban_format)
41 return self.numerify(temp)
42
43 def iban(self):
44 """Generate an International Bank Account Number (IBAN)."""
45 bban = self.bban()
46
47 check = bban + self.country_code + '00'
48 check = int(''.join(self.ALPHA.get(c, c) for c in check))
49 check = 98 - (check % 97)
50 check = str(check).zfill(2)
51
52 return self.country_code + check + bban
53
54 def swift8(self, use_dataset=False):
55 """Generate an 8-digit SWIFT code.
56
57 This method uses |swift| under the hood with the ``length`` argument set
58 to ``8`` and with the ``primary`` argument omitted. All 8-digit SWIFT
59 codes already refer to the primary branch/office.
60
61 :sample:
62 :sample: use_dataset=True
63 """
64 return self.swift(length=8, use_dataset=use_dataset)
65
66 def swift11(self, primary=None, use_dataset=False):
67 """Generate an 11-digit SWIFT code.
68
69 This method uses |swift| under the hood with the ``length`` argument set
70 to ``11``. If ``primary`` is set to ``True``, the SWIFT code will always
71 end with ``'XXX'``. All 11-digit SWIFT codes use this convention to
72 refer to the primary branch/office.
73
74 :sample:
75 :sample: use_dataset=True
76 """
77 return self.swift(length=11, primary=primary, use_dataset=use_dataset)
78
79 def swift(self, length=None, primary=None, use_dataset=False):
80 """Generate a SWIFT code.
81
82 SWIFT codes, reading from left to right, are composed of a 4 alphabet
83 character bank code, a 2 alphabet character country code, a 2
84 alphanumeric location code, and an optional 3 alphanumeric branch code.
85 This means SWIFT codes can only have 8 or 11 characters, so the value of
86 ``length`` can only be ``None`` or the integers ``8`` or ``11``. If the
87 value is ``None``, then a value of ``8`` or ``11`` will randomly be
88 assigned.
89
90 Because all 8-digit SWIFT codes already refer to the primary branch or
91 office, the ``primary`` argument only has an effect if the value of
92 ``length`` is ``11``. If ``primary`` is ``True`` and ``length`` is
93 ``11``, the 11-digit SWIFT codes generated will always end in ``'XXX'``
94 to denote that they belong to primary branches/offices.
95
96 For extra authenticity, localized providers may opt to include SWIFT
97 bank codes, location codes, and branch codes used in their respective
98 locales. If ``use_dataset`` is ``True``, this method will generate SWIFT
99 codes based on those locale-specific codes if included. If those codes
100 were not included, then it will behave as if ``use_dataset`` were
101 ``False``, and in that mode, all those codes will just be randomly
102 generated as per the specification.
103
104 :sample:
105 :sample: length=8
106 :sample: length=8, use_dataset=True
107 :sample: length=11
108 :sample: length=11, primary=True
109 :sample: length=11, use_dataset=True
110 :sample: length=11, primary=True, use_dataset=True
111 """
112 if length is None:
113 length = self.random_element((8, 11))
114 if length not in (8, 11):
115 raise AssertionError('length can only be 8 or 11')
116
117 if use_dataset and hasattr(self, 'swift_bank_codes'):
118 bank_code = self.random_element(self.swift_bank_codes)
119 else:
120 bank_code = self.lexify('????', letters=string.ascii_uppercase)
121
122 if use_dataset and hasattr(self, 'swift_location_codes'):
123 location_code = self.random_element(self.swift_location_codes)
124 else:
125 location_code = self.lexify('??', letters=string.ascii_uppercase + string.digits)
126
127 if length == 8:
128 return bank_code + self.country_code + location_code
129
130 if primary:
131 branch_code = 'XXX'
132 elif use_dataset and hasattr(self, 'swift_branch_codes'):
133 branch_code = self.random_element(self.swift_branch_codes)
134 else:
135 branch_code = self.lexify('???', letters=string.ascii_uppercase + string.digits)
136
137 return bank_code + self.country_code + location_code + branch_code
138
[end of faker/providers/bank/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/bank/__init__.py b/faker/providers/bank/__init__.py
--- a/faker/providers/bank/__init__.py
+++ b/faker/providers/bank/__init__.py
@@ -1,6 +1,7 @@
import re
import string
+from math import ceil
from string import ascii_uppercase
from .. import BaseProvider
@@ -29,6 +30,19 @@
bban_format = '????#############'
country_code = 'GB'
+ def aba(self):
+ """Generate an ABA routing transit number."""
+ fed_num = self.random_int(min=1, max=12)
+ rand = self.numerify('######')
+ aba = f"{fed_num:02}{rand}"
+
+ # calculate check digit
+ d = [int(n) for n in aba]
+ chk_digit = 3*(d[0] + d[3] + d[6]) + 7*(d[1] + d[4] + d[7]) + d[2] + d[5]
+ chk_digit = ceil(chk_digit/10)*10 - chk_digit
+
+ return f"{aba}{chk_digit}"
+
def bank_country(self):
"""Generate the bank provider's ISO 3166-1 alpha-2 country code."""
return self.country_code
| {"golden_diff": "diff --git a/faker/providers/bank/__init__.py b/faker/providers/bank/__init__.py\n--- a/faker/providers/bank/__init__.py\n+++ b/faker/providers/bank/__init__.py\n@@ -1,6 +1,7 @@\n import re\n import string\n \n+from math import ceil\n from string import ascii_uppercase\n \n from .. import BaseProvider\n@@ -29,6 +30,19 @@\n bban_format = '????#############'\n country_code = 'GB'\n \n+ def aba(self):\n+ \"\"\"Generate an ABA routing transit number.\"\"\"\n+ fed_num = self.random_int(min=1, max=12)\n+ rand = self.numerify('######')\n+ aba = f\"{fed_num:02}{rand}\"\n+\n+ # calculate check digit\n+ d = [int(n) for n in aba]\n+ chk_digit = 3*(d[0] + d[3] + d[6]) + 7*(d[1] + d[4] + d[7]) + d[2] + d[5]\n+ chk_digit = ceil(chk_digit/10)*10 - chk_digit\n+\n+ return f\"{aba}{chk_digit}\"\n+\n def bank_country(self):\n \"\"\"Generate the bank provider's ISO 3166-1 alpha-2 country code.\"\"\"\n return self.country_code\n", "issue": "Adding ABA Routing Number to Bank Provider\n* Faker version:8.1.2\r\n\r\n\r\nIs there any interest in adding ABA routing numbers to the Bank provider? https://en.wikipedia.org/wiki/ABA_routing_transit_number\r\n\r\nThis would be helpful for my use case and have code to generate but wanted to check before putting more effort in.\n", "before_files": [{"content": "import re\nimport string\n\nfrom string import ascii_uppercase\n\nfrom .. import BaseProvider\n\nlocalized = True\ndefault_locale = 'en_GB'\n\n\nclass Provider(BaseProvider):\n \"\"\"Implement default bank provider for Faker.\n\n .. important::\n Bank codes, account numbers, and other ID's generated by this provider\n are only valid in form, i.e. they conform to some standard/format, are\n of the expected lengths, and have valid checksums (where applicable).\n Results generated that turn out to be valid in real life are purely\n coincidental.\n\n Sources:\n\n - https://en.wikipedia.org/wiki/International_Bank_Account_Number\n - https://www.theswiftcodes.com/swift-code-checker/\n \"\"\"\n\n ALPHA = {c: str(ord(c) % 55) for c in string.ascii_uppercase}\n bban_format = '????#############'\n country_code = 'GB'\n\n def bank_country(self):\n \"\"\"Generate the bank provider's ISO 3166-1 alpha-2 country code.\"\"\"\n return self.country_code\n\n def bban(self):\n \"\"\"Generate a Basic Bank Account Number (BBAN).\"\"\"\n temp = re.sub(r'\\?',\n lambda x: self.random_element(ascii_uppercase),\n self.bban_format)\n return self.numerify(temp)\n\n def iban(self):\n \"\"\"Generate an International Bank Account Number (IBAN).\"\"\"\n bban = self.bban()\n\n check = bban + self.country_code + '00'\n check = int(''.join(self.ALPHA.get(c, c) for c in check))\n check = 98 - (check % 97)\n check = str(check).zfill(2)\n\n return self.country_code + check + bban\n\n def swift8(self, use_dataset=False):\n \"\"\"Generate an 8-digit SWIFT code.\n\n This method uses |swift| under the hood with the ``length`` argument set\n to ``8`` and with the ``primary`` argument omitted. All 8-digit SWIFT\n codes already refer to the primary branch/office.\n\n :sample:\n :sample: use_dataset=True\n \"\"\"\n return self.swift(length=8, use_dataset=use_dataset)\n\n def swift11(self, primary=None, use_dataset=False):\n \"\"\"Generate an 11-digit SWIFT code.\n\n This method uses |swift| under the hood with the ``length`` argument set\n to ``11``. If ``primary`` is set to ``True``, the SWIFT code will always\n end with ``'XXX'``. All 11-digit SWIFT codes use this convention to\n refer to the primary branch/office.\n\n :sample:\n :sample: use_dataset=True\n \"\"\"\n return self.swift(length=11, primary=primary, use_dataset=use_dataset)\n\n def swift(self, length=None, primary=None, use_dataset=False):\n \"\"\"Generate a SWIFT code.\n\n SWIFT codes, reading from left to right, are composed of a 4 alphabet\n character bank code, a 2 alphabet character country code, a 2\n alphanumeric location code, and an optional 3 alphanumeric branch code.\n This means SWIFT codes can only have 8 or 11 characters, so the value of\n ``length`` can only be ``None`` or the integers ``8`` or ``11``. If the\n value is ``None``, then a value of ``8`` or ``11`` will randomly be\n assigned.\n\n Because all 8-digit SWIFT codes already refer to the primary branch or\n office, the ``primary`` argument only has an effect if the value of\n ``length`` is ``11``. If ``primary`` is ``True`` and ``length`` is\n ``11``, the 11-digit SWIFT codes generated will always end in ``'XXX'``\n to denote that they belong to primary branches/offices.\n\n For extra authenticity, localized providers may opt to include SWIFT\n bank codes, location codes, and branch codes used in their respective\n locales. If ``use_dataset`` is ``True``, this method will generate SWIFT\n codes based on those locale-specific codes if included. If those codes\n were not included, then it will behave as if ``use_dataset`` were\n ``False``, and in that mode, all those codes will just be randomly\n generated as per the specification.\n\n :sample:\n :sample: length=8\n :sample: length=8, use_dataset=True\n :sample: length=11\n :sample: length=11, primary=True\n :sample: length=11, use_dataset=True\n :sample: length=11, primary=True, use_dataset=True\n \"\"\"\n if length is None:\n length = self.random_element((8, 11))\n if length not in (8, 11):\n raise AssertionError('length can only be 8 or 11')\n\n if use_dataset and hasattr(self, 'swift_bank_codes'):\n bank_code = self.random_element(self.swift_bank_codes)\n else:\n bank_code = self.lexify('????', letters=string.ascii_uppercase)\n\n if use_dataset and hasattr(self, 'swift_location_codes'):\n location_code = self.random_element(self.swift_location_codes)\n else:\n location_code = self.lexify('??', letters=string.ascii_uppercase + string.digits)\n\n if length == 8:\n return bank_code + self.country_code + location_code\n\n if primary:\n branch_code = 'XXX'\n elif use_dataset and hasattr(self, 'swift_branch_codes'):\n branch_code = self.random_element(self.swift_branch_codes)\n else:\n branch_code = self.lexify('???', letters=string.ascii_uppercase + string.digits)\n\n return bank_code + self.country_code + location_code + branch_code\n", "path": "faker/providers/bank/__init__.py"}]} | 2,207 | 308 |
gh_patches_debug_5161 | rasdani/github-patches | git_diff | kubeflow__pipelines-6683 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[sdk] dependency conflict with tensorflow 2.6.0 and seldon-core
### Environment
* KFP version: 1.7
* KFP SDK version: 1.8.3
* All dependencies version:
```
[~]$ pip list | grep kfp
kfp 1.8.3
kfp-pipeline-spec 0.1.11
kfp-server-api 1.7.0
```
kfp==1.8.3 collides with tensorflow==2.6 because it requires
https://github.com/kubeflow/pipelines/blob/220d79df66e31bbd93c409fb361e0463bde4aeac/sdk/python/setup.py#L56
while tensorflow needs
```
Warning!!! Possibly conflicting dependencies found:
* tensorflow==2.6.0
- typing-extensions [required: ~=3.7.4, installed: 3.10.0.2]
```
https://github.com/tensorflow/tensorflow/blob/421fba8888bb8f8724bc2e35ca2fdcde16e1bfe5/tensorflow/tools/pip_package/setup.py#L90
is `'typing-extensions>=3.7.4,<4;python_version<"3.9"'` not enough?
The same goes for seldon-core==1.11.* and package click and absl-py
```
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
kfp 1.8.2 requires absl-py<=0.11,>=0.9, but you have absl-py 0.13.0 which is incompatible.
kfp 1.8.2 requires click<8,>=7.1.1, but you have click 8.0.1 which is incompatible.
kfp 1.8.2 requires typing-extensions<4,>=3.10.0.2, but you have typing-extensions 3.7.4.3 which is incompatible.
```
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2018 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17
18 from setuptools import setup
19
20 NAME = 'kfp'
21 #VERSION = .... Change the version in kfp/__init__.py
22
23 # NOTICE, after any updates to the following, ./requirements.in should be updated
24 # accordingly.
25 REQUIRES = [
26 'absl-py>=0.9,<=0.11',
27 'PyYAML>=5.3,<6',
28 # `Blob.from_string` was introduced in google-cloud-storage 1.20.0
29 # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200
30 'google-cloud-storage>=1.20.0,<2',
31 'kubernetes>=8.0.0,<19',
32 # google-api-python-client v2 doesn't work for private dicovery by default:
33 # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235
34 'google-api-python-client>=1.7.8,<2',
35 'google-auth>=1.6.1,<2',
36 'requests-toolbelt>=0.8.0,<1',
37 'cloudpickle>=1.3.0,<2',
38 # Update the upper version whenever a new major version of the
39 # kfp-server-api package is released.
40 # Update the lower version when kfp sdk depends on new apis/fields in
41 # kfp-server-api.
42 # Note, please also update ./requirements.in
43 'kfp-server-api>=1.1.2,<2.0.0',
44 'jsonschema>=3.0.1,<4',
45 'tabulate>=0.8.6,<1',
46 'click>=7.1.1,<8',
47 'Deprecated>=1.2.7,<2',
48 'strip-hints>=0.1.8,<1',
49 'docstring-parser>=0.7.3,<1',
50 'kfp-pipeline-spec>=0.1.10,<0.2.0',
51 'fire>=0.3.1,<1',
52 'protobuf>=3.13.0,<4',
53 'uritemplate>=3.0.1,<4',
54 # Standard library backports
55 'dataclasses;python_version<"3.7"',
56 'typing-extensions>=3.10.0.2,<4;python_version<"3.9"',
57 'pydantic>=1.8.2,<2',
58 ]
59
60 TESTS_REQUIRE = [
61 'frozendict',
62 ]
63
64
65 def find_version(*file_path_parts):
66 here = os.path.abspath(os.path.dirname(__file__))
67 with open(os.path.join(here, *file_path_parts), 'r') as fp:
68 version_file_text = fp.read()
69
70 version_match = re.search(
71 r"^__version__ = ['\"]([^'\"]*)['\"]",
72 version_file_text,
73 re.M,
74 )
75 if version_match:
76 return version_match.group(1)
77
78 raise RuntimeError('Unable to find version string.')
79
80
81 setup(
82 name=NAME,
83 version=find_version('kfp', '__init__.py'),
84 description='KubeFlow Pipelines SDK',
85 author='The Kubeflow Authors',
86 url="https://github.com/kubeflow/pipelines",
87 project_urls={
88 "Documentation": "https://kubeflow-pipelines.readthedocs.io/en/stable/",
89 "Bug Tracker": "https://github.com/kubeflow/pipelines/issues",
90 "Source": "https://github.com/kubeflow/pipelines/tree/master/sdk",
91 "Changelog": "https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md",
92 },
93 install_requires=REQUIRES,
94 tests_require=TESTS_REQUIRE,
95 packages=[
96 'kfp',
97 'kfp.auth',
98 'kfp.cli',
99 'kfp.cli.diagnose_me',
100 'kfp.compiler',
101 'kfp.components',
102 'kfp.components.structures',
103 'kfp.containers',
104 'kfp.dsl',
105 'kfp.dsl.extensions',
106 'kfp.notebook',
107 'kfp.v2',
108 'kfp.v2.compiler',
109 'kfp.v2.components',
110 'kfp.v2.components.types',
111 'kfp.v2.components.experimental',
112 'kfp.v2.dsl',
113 'kfp.v2.google.client',
114 'kfp.v2.google.experimental',
115 ],
116 classifiers=[
117 'Intended Audience :: Developers',
118 'Intended Audience :: Education',
119 'Intended Audience :: Science/Research',
120 'License :: OSI Approved :: Apache Software License',
121 'Programming Language :: Python :: 3',
122 'Programming Language :: Python :: 3.6',
123 'Programming Language :: Python :: 3.7',
124 'Programming Language :: Python :: 3.8',
125 'Programming Language :: Python :: 3.9',
126 'Topic :: Scientific/Engineering',
127 'Topic :: Scientific/Engineering :: Artificial Intelligence',
128 'Topic :: Software Development',
129 'Topic :: Software Development :: Libraries',
130 'Topic :: Software Development :: Libraries :: Python Modules',
131 ],
132 python_requires='>=3.6.1',
133 include_package_data=True,
134 entry_points={
135 'console_scripts': [
136 'dsl-compile = kfp.compiler.main:main',
137 'dsl-compile-v2 = kfp.v2.compiler.main:main',
138 'kfp=kfp.__main__:main'
139 ]
140 })
141
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -51,10 +51,10 @@
'fire>=0.3.1,<1',
'protobuf>=3.13.0,<4',
'uritemplate>=3.0.1,<4',
+ 'pydantic>=1.8.2,<2',
# Standard library backports
'dataclasses;python_version<"3.7"',
- 'typing-extensions>=3.10.0.2,<4;python_version<"3.9"',
- 'pydantic>=1.8.2,<2',
+ 'typing-extensions>=3.7.4,<4;python_version<"3.9"',
]
TESTS_REQUIRE = [
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -51,10 +51,10 @@\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n+ 'pydantic>=1.8.2,<2',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n- 'typing-extensions>=3.10.0.2,<4;python_version<\"3.9\"',\n- 'pydantic>=1.8.2,<2',\n+ 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n ]\n \n TESTS_REQUIRE = [\n", "issue": "[sdk] dependency conflict with tensorflow 2.6.0 and seldon-core\n### Environment\r\n\r\n* KFP version: 1.7\r\n* KFP SDK version: 1.8.3\r\n\r\n\r\n* All dependencies version:\r\n```\r\n[~]$ pip list | grep kfp\r\nkfp 1.8.3\r\nkfp-pipeline-spec 0.1.11\r\nkfp-server-api 1.7.0\r\n```\r\nkfp==1.8.3 collides with tensorflow==2.6 because it requires \r\n\r\nhttps://github.com/kubeflow/pipelines/blob/220d79df66e31bbd93c409fb361e0463bde4aeac/sdk/python/setup.py#L56\r\n\r\nwhile tensorflow needs\r\n```\r\nWarning!!! Possibly conflicting dependencies found:\r\n* tensorflow==2.6.0\r\n - typing-extensions [required: ~=3.7.4, installed: 3.10.0.2]\r\n```\r\n\r\nhttps://github.com/tensorflow/tensorflow/blob/421fba8888bb8f8724bc2e35ca2fdcde16e1bfe5/tensorflow/tools/pip_package/setup.py#L90\r\n\r\nis `'typing-extensions>=3.7.4,<4;python_version<\"3.9\"'` not enough?\r\n\r\nThe same goes for seldon-core==1.11.* and package click and absl-py\r\n\r\n```\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nkfp 1.8.2 requires absl-py<=0.11,>=0.9, but you have absl-py 0.13.0 which is incompatible.\r\nkfp 1.8.2 requires click<8,>=7.1.1, but you have click 8.0.1 which is incompatible.\r\nkfp 1.8.2 requires typing-extensions<4,>=3.10.0.2, but you have typing-extensions 3.7.4.3 which is incompatible.\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # google-api-python-client v2 doesn't work for private dicovery by default:\n # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=1.3.0,<2',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.1,<8',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.10,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.10.0.2,<4;python_version<\"3.9\"',\n 'pydantic>=1.8.2,<2',\n]\n\nTESTS_REQUIRE = [\n 'frozendict',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\": \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\": \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\": \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\": \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.components.experimental',\n 'kfp.v2.dsl',\n 'kfp.v2.google.client',\n 'kfp.v2.google.experimental',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]} | 2,652 | 184 |
gh_patches_debug_18444 | rasdani/github-patches | git_diff | xonsh__xonsh-2006 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PTK OSError Bad file descriptor
When I run a command like `cat foo.json | py -m json.tool`, xonsh got crash with following errors after the command pipe finish (with output correct). I'm not sure it's a ptk issue or xonsh one. Either way I think we should add some `try-catch` for not crashing the shell at least and for stability.
```
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shortcuts.py", line 108, in prompt
document = cli.run(reset_current_buffer=False)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/interface.py", line 394, in run
self.eventloop.run(self.input, self.create_eventloop_callbacks())
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py", line 93, in run
self.add_reader(self._schedule_pipe[0], None)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py", line 280, in add_reader
self.selector.register(fd)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/select.py", line 75, in register
sel.register(fd)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/select.py", line 130, in register
self._sel.register(fd, selectors.EVENT_READ, None)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/selectors.py", line 536, in register
self._kqueue.control([kev], 0, 0)
OSError: [Errno 9] Bad file descriptor
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.5/bin/xonsh", line 3, in <module>
main()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/main.py", line 276, in main
shell.shell.cmdloop()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shell.py", line 132, in cmdloop
line = self.singleline(auto_suggest=auto_suggest)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shell.py", line 101, in singleline
line = self.prompter.prompt(**prompt_args)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shortcuts.py", line 113, in prompt
eventloop.close()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py", line 270, in close
os.close(schedule_pipe[0])
OSError: [Errno 9] Bad file descriptor
```
</issue>
<code>
[start of xonsh/ptk/shortcuts.py]
1 """A prompt-toolkit inspired shortcut collection."""
2 import builtins
3 import textwrap
4
5 from prompt_toolkit.interface import CommandLineInterface
6 from prompt_toolkit.enums import EditingMode
7 from prompt_toolkit.utils import DummyContext
8 from prompt_toolkit.shortcuts import (create_prompt_application,
9 create_eventloop, create_asyncio_eventloop, create_output)
10
11 from xonsh.platform import ptk_version_info
12
13
14 class Prompter(object):
15
16 def __init__(self, cli=None, *args, **kwargs):
17 """Implements a prompt that statefully holds a command-line
18 interface. When used as a context manager, it will return itself
19 on entry and reset itself on exit.
20
21 Parameters
22 ----------
23 cli : CommandLineInterface or None, optional
24 If this is not a CommandLineInterface object, such an object
25 will be created when the prompt() method is called.
26 """
27 self.cli = cli
28 self.major_minor = ptk_version_info()[:2]
29
30 def __enter__(self):
31 self.reset()
32 return self
33
34 def __exit__(self, exc_type, exc_value, traceback):
35 pass
36
37 def prompt(self, message='', **kwargs):
38 """Get input from the user and return it.
39
40 This is a wrapper around a lot of prompt_toolkit functionality and
41 can be a replacement for raw_input. (or GNU readline.) If you want
42 to keep your history across several calls, create one
43 `~prompt_toolkit.history.History instance and pass it every
44 time. This function accepts many keyword arguments. Except for the
45 following. they are a proxy to the arguments of
46 create_prompt_application().
47
48 Parameters
49 ----------
50 patch_stdout : file-like, optional
51 Replace ``sys.stdout`` by a proxy that ensures that print
52 statements from other threads won't destroy the prompt. (They
53 will be printed above the prompt instead.)
54 return_asyncio_coroutine : bool, optional
55 When True, return a asyncio coroutine. (Python >3.3)
56
57 Notes
58 -----
59 This method was forked from the mainline prompt-toolkit repo.
60 Copyright (c) 2014, Jonathan Slenders, All rights reserved.
61 """
62 patch_stdout = kwargs.pop('patch_stdout', False)
63 return_asyncio_coroutine = kwargs.pop('return_asyncio_coroutine', False)
64 if return_asyncio_coroutine:
65 eventloop = create_asyncio_eventloop()
66 else:
67 eventloop = kwargs.pop('eventloop', None) or create_eventloop()
68
69 # Create CommandLineInterface.
70 if self.cli is None:
71 if builtins.__xonsh_env__.get('VI_MODE'):
72 editing_mode = EditingMode.VI
73 else:
74 editing_mode = EditingMode.EMACS
75 kwargs['editing_mode'] = editing_mode
76 cli = CommandLineInterface(
77 application=create_prompt_application(message, **kwargs),
78 eventloop=eventloop,
79 output=create_output())
80 self.cli = cli
81 else:
82 cli = self.cli
83
84 # Replace stdout.
85 patch_context = cli.patch_stdout_context() if patch_stdout else DummyContext()
86
87 # Read input and return it.
88 if return_asyncio_coroutine:
89 # Create an asyncio coroutine and call it.
90 exec_context = {'patch_context': patch_context, 'cli': cli}
91 exec(textwrap.dedent('''
92 import asyncio
93 @asyncio.coroutine
94 def prompt_coro():
95 with patch_context:
96 document = yield from cli.run_async(reset_current_buffer=False)
97 if document:
98 return document.text
99 '''), exec_context)
100 return exec_context['prompt_coro']()
101 else:
102 # Note: We pass `reset_current_buffer=False`, because that way
103 # it's easy to give DEFAULT_BUFFER a default value, without it
104 # getting erased. We don't have to reset anyway, because this is
105 # the first and only time that this CommandLineInterface will run.
106 try:
107 with patch_context:
108 document = cli.run(reset_current_buffer=False)
109
110 if document:
111 return document.text
112 finally:
113 eventloop.close()
114
115 def reset(self):
116 """Resets the prompt and cli to a pristine state on this object."""
117 self.cli = None
118
[end of xonsh/ptk/shortcuts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/ptk/shortcuts.py b/xonsh/ptk/shortcuts.py
--- a/xonsh/ptk/shortcuts.py
+++ b/xonsh/ptk/shortcuts.py
@@ -9,6 +9,7 @@
create_eventloop, create_asyncio_eventloop, create_output)
from xonsh.platform import ptk_version_info
+import xonsh.tools as xt
class Prompter(object):
@@ -106,9 +107,13 @@
try:
with patch_context:
document = cli.run(reset_current_buffer=False)
-
if document:
return document.text
+ except Exception:
+ xt.print_exception()
+ # return something to prevent xonsh crash when any
+ # exceptions raise
+ return ''
finally:
eventloop.close()
| {"golden_diff": "diff --git a/xonsh/ptk/shortcuts.py b/xonsh/ptk/shortcuts.py\n--- a/xonsh/ptk/shortcuts.py\n+++ b/xonsh/ptk/shortcuts.py\n@@ -9,6 +9,7 @@\n create_eventloop, create_asyncio_eventloop, create_output)\n \n from xonsh.platform import ptk_version_info\n+import xonsh.tools as xt\n \n \n class Prompter(object):\n@@ -106,9 +107,13 @@\n try:\n with patch_context:\n document = cli.run(reset_current_buffer=False)\n-\n if document:\n return document.text\n+ except Exception:\n+ xt.print_exception()\n+ # return something to prevent xonsh crash when any\n+ # exceptions raise\n+ return ''\n finally:\n eventloop.close()\n", "issue": "PTK OSError Bad file descriptor\nWhen I run a command like `cat foo.json | py -m json.tool`, xonsh got crash with following errors after the command pipe finish (with output correct). I'm not sure it's a ptk issue or xonsh one. Either way I think we should add some `try-catch` for not crashing the shell at least and for stability.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shortcuts.py\", line 108, in prompt\r\n document = cli.run(reset_current_buffer=False)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/interface.py\", line 394, in run\r\n self.eventloop.run(self.input, self.create_eventloop_callbacks())\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py\", line 93, in run\r\n self.add_reader(self._schedule_pipe[0], None)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py\", line 280, in add_reader\r\n self.selector.register(fd)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/select.py\", line 75, in register\r\n sel.register(fd)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/select.py\", line 130, in register\r\n self._sel.register(fd, selectors.EVENT_READ, None)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/selectors.py\", line 536, in register\r\n self._kqueue.control([kev], 0, 0)\r\nOSError: [Errno 9] Bad file descriptor\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/bin/xonsh\", line 3, in <module>\r\n main()\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/main.py\", line 276, in main\r\n shell.shell.cmdloop()\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shell.py\", line 132, in cmdloop\r\n line = self.singleline(auto_suggest=auto_suggest)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shell.py\", line 101, in singleline\r\n line = self.prompter.prompt(**prompt_args)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/xonsh/ptk/shortcuts.py\", line 113, in prompt\r\n eventloop.close()\r\n File \"/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py\", line 270, in close\r\n os.close(schedule_pipe[0])\r\nOSError: [Errno 9] Bad file descriptor\r\n```\n", "before_files": [{"content": "\"\"\"A prompt-toolkit inspired shortcut collection.\"\"\"\nimport builtins\nimport textwrap\n\nfrom prompt_toolkit.interface import CommandLineInterface\nfrom prompt_toolkit.enums import EditingMode\nfrom prompt_toolkit.utils import DummyContext\nfrom prompt_toolkit.shortcuts import (create_prompt_application,\n create_eventloop, create_asyncio_eventloop, create_output)\n\nfrom xonsh.platform import ptk_version_info\n\n\nclass Prompter(object):\n\n def __init__(self, cli=None, *args, **kwargs):\n \"\"\"Implements a prompt that statefully holds a command-line\n interface. When used as a context manager, it will return itself\n on entry and reset itself on exit.\n\n Parameters\n ----------\n cli : CommandLineInterface or None, optional\n If this is not a CommandLineInterface object, such an object\n will be created when the prompt() method is called.\n \"\"\"\n self.cli = cli\n self.major_minor = ptk_version_info()[:2]\n\n def __enter__(self):\n self.reset()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def prompt(self, message='', **kwargs):\n \"\"\"Get input from the user and return it.\n\n This is a wrapper around a lot of prompt_toolkit functionality and\n can be a replacement for raw_input. (or GNU readline.) If you want\n to keep your history across several calls, create one\n `~prompt_toolkit.history.History instance and pass it every\n time. This function accepts many keyword arguments. Except for the\n following. they are a proxy to the arguments of\n create_prompt_application().\n\n Parameters\n ----------\n patch_stdout : file-like, optional\n Replace ``sys.stdout`` by a proxy that ensures that print\n statements from other threads won't destroy the prompt. (They\n will be printed above the prompt instead.)\n return_asyncio_coroutine : bool, optional\n When True, return a asyncio coroutine. (Python >3.3)\n\n Notes\n -----\n This method was forked from the mainline prompt-toolkit repo.\n Copyright (c) 2014, Jonathan Slenders, All rights reserved.\n \"\"\"\n patch_stdout = kwargs.pop('patch_stdout', False)\n return_asyncio_coroutine = kwargs.pop('return_asyncio_coroutine', False)\n if return_asyncio_coroutine:\n eventloop = create_asyncio_eventloop()\n else:\n eventloop = kwargs.pop('eventloop', None) or create_eventloop()\n\n # Create CommandLineInterface.\n if self.cli is None:\n if builtins.__xonsh_env__.get('VI_MODE'):\n editing_mode = EditingMode.VI\n else:\n editing_mode = EditingMode.EMACS\n kwargs['editing_mode'] = editing_mode\n cli = CommandLineInterface(\n application=create_prompt_application(message, **kwargs),\n eventloop=eventloop,\n output=create_output())\n self.cli = cli\n else:\n cli = self.cli\n\n # Replace stdout.\n patch_context = cli.patch_stdout_context() if patch_stdout else DummyContext()\n\n # Read input and return it.\n if return_asyncio_coroutine:\n # Create an asyncio coroutine and call it.\n exec_context = {'patch_context': patch_context, 'cli': cli}\n exec(textwrap.dedent('''\n import asyncio\n @asyncio.coroutine\n def prompt_coro():\n with patch_context:\n document = yield from cli.run_async(reset_current_buffer=False)\n if document:\n return document.text\n '''), exec_context)\n return exec_context['prompt_coro']()\n else:\n # Note: We pass `reset_current_buffer=False`, because that way\n # it's easy to give DEFAULT_BUFFER a default value, without it\n # getting erased. We don't have to reset anyway, because this is\n # the first and only time that this CommandLineInterface will run.\n try:\n with patch_context:\n document = cli.run(reset_current_buffer=False)\n\n if document:\n return document.text\n finally:\n eventloop.close()\n\n def reset(self):\n \"\"\"Resets the prompt and cli to a pristine state on this object.\"\"\"\n self.cli = None\n", "path": "xonsh/ptk/shortcuts.py"}]} | 2,475 | 190 |
gh_patches_debug_60855 | rasdani/github-patches | git_diff | airctic__icevision-500 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add tutorial with hard negative samples
## 📓 Documentation Update
"how to use an image as background annotation" is a common question. We can provide a tutorial showing how to do that
### Racoon and dogs
If you train a model on the racoon dataset and show the model a picture of a dog it will classify it as a racoon. We can add images of dogs to the dataset (without any annotations) and show how the difference of model performance in both scenarios.
</issue>
<code>
[start of icevision/models/base_show_results.py]
1 __all__ = ["base_show_results"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5 from icevision.core import *
6 from icevision.visualize import *
7 from icevision.data import *
8
9
10 def base_show_results(
11 predict_fn: callable,
12 build_infer_batch_fn: callable,
13 model: nn.Module,
14 dataset: Dataset,
15 class_map: Optional[ClassMap] = None,
16 num_samples: int = 6,
17 ncols: int = 3,
18 denormalize_fn: Optional[callable] = denormalize_imagenet,
19 show: bool = True,
20 ) -> None:
21 samples = [dataset[i] for i in range(num_samples)]
22 batch, samples = build_infer_batch_fn(samples)
23 preds = predict_fn(model, batch)
24
25 imgs = [sample["img"] for sample in samples]
26 show_preds(
27 imgs,
28 preds,
29 class_map=class_map,
30 denormalize_fn=denormalize_fn,
31 ncols=ncols,
32 show=show,
33 )
34
[end of icevision/models/base_show_results.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/base_show_results.py b/icevision/models/base_show_results.py
--- a/icevision/models/base_show_results.py
+++ b/icevision/models/base_show_results.py
@@ -18,7 +18,7 @@
denormalize_fn: Optional[callable] = denormalize_imagenet,
show: bool = True,
) -> None:
- samples = [dataset[i] for i in range(num_samples)]
+ samples = random.choices(dataset, k=num_samples)
batch, samples = build_infer_batch_fn(samples)
preds = predict_fn(model, batch)
| {"golden_diff": "diff --git a/icevision/models/base_show_results.py b/icevision/models/base_show_results.py\n--- a/icevision/models/base_show_results.py\n+++ b/icevision/models/base_show_results.py\n@@ -18,7 +18,7 @@\n denormalize_fn: Optional[callable] = denormalize_imagenet,\n show: bool = True,\n ) -> None:\n- samples = [dataset[i] for i in range(num_samples)]\n+ samples = random.choices(dataset, k=num_samples)\n batch, samples = build_infer_batch_fn(samples)\n preds = predict_fn(model, batch)\n", "issue": "Add tutorial with hard negative samples\n## \ud83d\udcd3 Documentation Update\r\n\"how to use an image as background annotation\" is a common question. We can provide a tutorial showing how to do that\r\n\r\n### Racoon and dogs\r\nIf you train a model on the racoon dataset and show the model a picture of a dog it will classify it as a racoon. We can add images of dogs to the dataset (without any annotations) and show how the difference of model performance in both scenarios.\n", "before_files": [{"content": "__all__ = [\"base_show_results\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.core import *\nfrom icevision.visualize import *\nfrom icevision.data import *\n\n\ndef base_show_results(\n predict_fn: callable,\n build_infer_batch_fn: callable,\n model: nn.Module,\n dataset: Dataset,\n class_map: Optional[ClassMap] = None,\n num_samples: int = 6,\n ncols: int = 3,\n denormalize_fn: Optional[callable] = denormalize_imagenet,\n show: bool = True,\n) -> None:\n samples = [dataset[i] for i in range(num_samples)]\n batch, samples = build_infer_batch_fn(samples)\n preds = predict_fn(model, batch)\n\n imgs = [sample[\"img\"] for sample in samples]\n show_preds(\n imgs,\n preds,\n class_map=class_map,\n denormalize_fn=denormalize_fn,\n ncols=ncols,\n show=show,\n )\n", "path": "icevision/models/base_show_results.py"}]} | 921 | 133 |
gh_patches_debug_15676 | rasdani/github-patches | git_diff | WeblateOrg__weblate-7984 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translation Memory Maintenance - Delete Entries
It would be good to be able to maintain the TM in one way or the other.
Perhaps
- [ ] Search & Replace in TM
- [ ] Search & Delete entries in TM
Or perhpas, as a "simple" (?) starting point, giving the translator the option to delete single entries from the TM when they see the result list in **Automatic Suggestions**. Like perhaps:
- [ ] Delete single entry in **Automatic Suggestions** view:

</issue>
<code>
[start of weblate/memory/machine.py]
1 #
2 # Copyright © 2012–2022 Michal Čihař <[email protected]>
3 #
4 # This file is part of Weblate <https://weblate.org/>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <https://www.gnu.org/licenses/>.
18 #
19
20 from weblate.machinery.base import MachineTranslation, get_machinery_language
21 from weblate.memory.models import Memory
22
23
24 class WeblateMemory(MachineTranslation):
25 """Translation service using strings already translated in Weblate."""
26
27 name = "Weblate Translation Memory"
28 rank_boost = 2
29 cache_translations = False
30 same_languages = True
31 accounting_key = "internal"
32 do_cleanup = False
33
34 def convert_language(self, language):
35 """No conversion of language object."""
36 return get_machinery_language(language)
37
38 def is_supported(self, source, language):
39 """Any language is supported."""
40 return True
41
42 def is_rate_limited(self):
43 """This service has no rate limiting."""
44 return False
45
46 def download_translations(
47 self,
48 source,
49 language,
50 text: str,
51 unit,
52 user,
53 search: bool,
54 threshold: int = 75,
55 ):
56 """Download list of possible translations from a service."""
57 for result in Memory.objects.lookup(
58 source,
59 language,
60 text,
61 user,
62 unit.translation.component.project,
63 unit.translation.component.project.use_shared_tm,
64 ).iterator():
65 quality = self.comparer.similarity(text, result.source)
66 if quality < 10 or (quality < threshold and not search):
67 continue
68 yield {
69 "text": result.target,
70 "quality": quality,
71 "service": self.name,
72 "origin": result.get_origin_display(),
73 "source": result.source,
74 "show_quality": True,
75 }
76
[end of weblate/memory/machine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/weblate/memory/machine.py b/weblate/memory/machine.py
--- a/weblate/memory/machine.py
+++ b/weblate/memory/machine.py
@@ -17,6 +17,8 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
+from django.urls import reverse
+
from weblate.machinery.base import MachineTranslation, get_machinery_language
from weblate.memory.models import Memory
@@ -72,4 +74,7 @@
"origin": result.get_origin_display(),
"source": result.source,
"show_quality": True,
+ "delete_url": reverse("api:memory-detail", kwargs={"pk": result.id})
+ if user is not None and user.has_perm("memory.delete", result)
+ else None,
}
| {"golden_diff": "diff --git a/weblate/memory/machine.py b/weblate/memory/machine.py\n--- a/weblate/memory/machine.py\n+++ b/weblate/memory/machine.py\n@@ -17,6 +17,8 @@\n # along with this program. If not, see <https://www.gnu.org/licenses/>.\n #\n \n+from django.urls import reverse\n+\n from weblate.machinery.base import MachineTranslation, get_machinery_language\n from weblate.memory.models import Memory\n \n@@ -72,4 +74,7 @@\n \"origin\": result.get_origin_display(),\n \"source\": result.source,\n \"show_quality\": True,\n+ \"delete_url\": reverse(\"api:memory-detail\", kwargs={\"pk\": result.id})\n+ if user is not None and user.has_perm(\"memory.delete\", result)\n+ else None,\n }\n", "issue": "Translation Memory Maintenance - Delete Entries\nIt would be good to be able to maintain the TM in one way or the other.\r\n\r\nPerhaps \r\n- [ ] Search & Replace in TM\r\n- [ ] Search & Delete entries in TM\r\n\r\nOr perhpas, as a \"simple\" (?) starting point, giving the translator the option to delete single entries from the TM when they see the result list in **Automatic Suggestions**. Like perhaps:\r\n\r\n- [ ] Delete single entry in **Automatic Suggestions** view:\r\n\r\n\r\n\n", "before_files": [{"content": "#\n# Copyright \u00a9 2012\u20132022 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nfrom weblate.machinery.base import MachineTranslation, get_machinery_language\nfrom weblate.memory.models import Memory\n\n\nclass WeblateMemory(MachineTranslation):\n \"\"\"Translation service using strings already translated in Weblate.\"\"\"\n\n name = \"Weblate Translation Memory\"\n rank_boost = 2\n cache_translations = False\n same_languages = True\n accounting_key = \"internal\"\n do_cleanup = False\n\n def convert_language(self, language):\n \"\"\"No conversion of language object.\"\"\"\n return get_machinery_language(language)\n\n def is_supported(self, source, language):\n \"\"\"Any language is supported.\"\"\"\n return True\n\n def is_rate_limited(self):\n \"\"\"This service has no rate limiting.\"\"\"\n return False\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n search: bool,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n for result in Memory.objects.lookup(\n source,\n language,\n text,\n user,\n unit.translation.component.project,\n unit.translation.component.project.use_shared_tm,\n ).iterator():\n quality = self.comparer.similarity(text, result.source)\n if quality < 10 or (quality < threshold and not search):\n continue\n yield {\n \"text\": result.target,\n \"quality\": quality,\n \"service\": self.name,\n \"origin\": result.get_origin_display(),\n \"source\": result.source,\n \"show_quality\": True,\n }\n", "path": "weblate/memory/machine.py"}]} | 1,372 | 185 |
gh_patches_debug_37589 | rasdani/github-patches | git_diff | ESMCI__cime-2777 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PR #2759 has caused DAE.f19_f19.A to fail during phase RUN
The errors can be reproduced on anlworkstation by running the following cime_developer test:
`./create_test DAE.f19_f19.A --machine=anlworkstation --test-root=****`
Errors were:
```
Exception during run:
ERROR: ERROR: Expected 1 Initial run message(s), 0 found in DAE.f19_f19.A.anlworkstation_gnu.xxxx/run/case2run/da.log.xxxx.gz
Traceback (most recent call last):
File "scripts/lib/CIME/SystemTests/system_tests_common.py", line 145, in run
self.run_phase()
File "scripts/lib/CIME/SystemTests/dae.py", line 155, in run_phase
"ERROR: Expected {} Initial run message(s), {} found in {}".format(expected_init, found_init, fname))
File "scripts/lib/CIME/utils.py", line 130, in expect
raise exc_type(msg)
SystemExit: ERROR: ERROR: Expected 1 Initial run message(s), 0 found in DAE.f19_f19.A.anlworkstation_gnu.xxxx/run/case2run/da.log.xxxx.gz
```
Do we need some settings on anlworkstation to make it work?
</issue>
<code>
[start of scripts/lib/CIME/SystemTests/dae.py]
1 """
2 Implementation of the CIME data assimilation test:
3 Compares standard run with run broken into two data assimilation cycles.
4 Runs a simple DA script on each cycle which performs checks but does not
5 change any model state (restart files). Compares answers of two runs.
6
7 """
8
9 import os.path
10 import logging
11 import glob
12 import gzip
13
14 import CIME.XML.standard_module_setup as sms
15 from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
16 from CIME.utils import expect
17
18 ###############################################################################
19 class DAE(SystemTestsCompareTwo):
20 ###############################################################################
21 """
22 Implementation of the CIME data assimilation test:
23 Compares standard run with a run broken into two data assimilation cycles.
24 Runs a simple DA script on each cycle which performs checks but does not
25 change any model state (restart files). Compares answers of two runs.
26 Refers to a faux data assimilation script in the
27 cime/scripts/data_assimilation directory
28 """
29
30 ###########################################################################
31 def __init__(self, case):
32 ###########################################################################
33 SystemTestsCompareTwo.__init__(self, case,
34 separate_builds=False,
35 run_two_suffix='da',
36 run_one_description='no data assimilation',
37 run_two_description='data assimilation')
38
39 ###########################################################################
40 def _case_one_setup(self):
41 ###########################################################################
42 # Even though there may be test mods turning on data assimilation,
43 # case1 is the control so turn it off
44 self._case.set_value("DATA_ASSIMILATION_SCRIPT", "")
45 self._case.set_value("DATA_ASSIMILATION_CYCLES", 1)
46
47 ###########################################################################
48 def _case_two_setup(self):
49 ###########################################################################
50 # Allow testmods to set an assimilation script
51 if len(self._case.get_value("DATA_ASSIMILATION_SCRIPT")) == 0:
52 # We need to find the scripts/data_assimilation directory
53 # LIB_DIR should be our parent dir
54 da_dir = os.path.join(os.path.dirname(sms.LIB_DIR), "data_assimilation")
55 expect(os.path.isdir(da_dir), "ERROR: da_dir, '{}', does not exist".format(da_dir))
56 da_file = os.path.join(da_dir, "da_no_data_mod.sh")
57 expect(os.path.isfile(da_file), "ERROR: da_file, '{}', does not exist".format(da_file))
58 # Set up two data assimilation cycles each half of the full run
59 self._case.set_value("DATA_ASSIMILATION_SCRIPT", da_file)
60
61 # We need at least 2 DA cycles
62 da_cycles = self._case.get_value("DATA_ASSIMILATION_CYCLES")
63 if da_cycles < 2:
64 da_cycles = 2
65 self._case.set_value("DATA_ASSIMILATION_CYCLES", da_cycles)
66 stopn = self._case.get_value("STOP_N")
67 expect((stopn % da_cycles) == 0, "ERROR: DAE test with {0} cycles requires that STOP_N be divisible by {0}".format(da_cycles))
68 stopn = int(stopn / da_cycles)
69 self._case.set_value("STOP_N", stopn)
70
71 self._case.flush()
72
73 ###########################################################################
74 def run_phase(self): # pylint: disable=arguments-differ
75 ###########################################################################
76 # Clean up any da.log files in case this is a re-run.
77 self._activate_case2()
78 case_root = self._get_caseroot2()
79 rundir2 = self._case.get_value("RUNDIR")
80 da_files = glob.glob(os.path.join(rundir2, 'da.log.*'))
81 for file_ in da_files:
82 os.remove(file_)
83 # End for
84
85 # CONTINUE_RUN ends up TRUE, set it back in case this is a re-run.
86 self._case.set_value("CONTINUE_RUN", False)
87 # Start normal run here
88 self._activate_case1()
89 SystemTestsCompareTwo.run_phase(self)
90
91 # Do some checks on the data assimilation 'output' from case2
92 self._activate_case2()
93 da_files = glob.glob(os.path.join(rundir2, 'da.log.*'))
94 if da_files is None:
95 logger = logging.getLogger(__name__)
96 path = os.path.join(case_root, 'da.log.*')
97 logger.warning("No DA files in {}".format(path))
98
99 da_cycles = self._case.get_value("DATA_ASSIMILATION_CYCLES")
100 expect((da_files is not None) and (len(da_files) == da_cycles),
101 "ERROR: There were {:d} DA cycles in run but {:d} DA files were found".format(da_cycles, len(da_files) if da_files is not None else 0))
102 da_files.sort()
103 cycle_num = 0
104 compset = self._case.get_value("COMPSET")
105 # Special case for DWAV so we can make sure other variables are set
106 is_dwav = '_DWAV' in compset
107 for fname in da_files:
108 found_caseroot = False
109 found_cycle = False
110 found_signal = 0
111 found_init = 0
112 if is_dwav:
113 expected_init = self._case.get_value("NINST_WAV")
114 else:
115 # Expect a signal from every instance of every DA component
116 expected_init = 0
117 for comp in self._case.get_values("COMP_CLASSES"):
118 if self._case.get_value("DATA_ASSIMILATION_{}".format(comp)):
119 expected_init = expected_init + self._case.get_value("NINST_{}".format(comp))
120
121 # Adjust expected initial run and post-DA numbers
122 if cycle_num == 0:
123 expected_signal = 0
124 else:
125 expected_signal = expected_init
126 expected_init = 0
127
128 with gzip.open(fname, "r") as dfile:
129 for bline in dfile:
130 line = bline.decode("utf-8")
131 expect(not 'ERROR' in line, "ERROR, error line {} found in {}".format(line, fname))
132 if 'caseroot' in line[0:8]:
133 found_caseroot = True
134 elif 'cycle' in line[0:5]:
135 found_cycle = True
136 expect(int(line[7:]) == cycle_num,
137 "ERROR: Wrong cycle ({:d}) found in {} (expected {:d})".format(int(line[7:]), fname, cycle_num))
138 elif 'resume signal' in line:
139 found_signal = found_signal + 1
140 expect('Post-DA resume signal found' in line[0:27],
141 "ERROR: bad post-DA message found in {}".format(fname))
142 elif 'Initial run' in line:
143 found_init = found_init + 1
144 expect('Initial run signal found' in line[0:24],
145 "ERROR: bad Initial run message found in {}".format(fname))
146 else:
147 expect(False, "ERROR: Unrecognized line ('{}') found in {}".format(line, fname))
148
149 # End of for loop
150 expect(found_caseroot, "ERROR: No caseroot found in {}".format(fname))
151 expect(found_cycle, "ERROR: No cycle found in {}".format(fname))
152 expect(found_signal == expected_signal,
153 "ERROR: Expected {} post-DA resume signal message(s), {} found in {}".format(expected_signal, found_signal, fname))
154 expect(found_init == expected_init,
155 "ERROR: Expected {} Initial run message(s), {} found in {}".format(expected_init, found_init, fname))
156 # End of with
157 cycle_num = cycle_num + 1
158
[end of scripts/lib/CIME/SystemTests/dae.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/CIME/SystemTests/dae.py b/scripts/lib/CIME/SystemTests/dae.py
--- a/scripts/lib/CIME/SystemTests/dae.py
+++ b/scripts/lib/CIME/SystemTests/dae.py
@@ -84,6 +84,12 @@
# CONTINUE_RUN ends up TRUE, set it back in case this is a re-run.
self._case.set_value("CONTINUE_RUN", False)
+ # Turn off post DA in case this is a re-run
+ for comp in self._case.get_values("COMP_CLASSES"):
+ if comp == "ESP":
+ continue
+ else:
+ self._case.set_value("DATA_ASSIMILATION_{}".format(comp), False)
# Start normal run here
self._activate_case1()
SystemTestsCompareTwo.run_phase(self)
@@ -115,7 +121,9 @@
# Expect a signal from every instance of every DA component
expected_init = 0
for comp in self._case.get_values("COMP_CLASSES"):
- if self._case.get_value("DATA_ASSIMILATION_{}".format(comp)):
+ if comp == "ESP":
+ continue
+ elif self._case.get_value("DATA_ASSIMILATION_{}".format(comp)):
expected_init = expected_init + self._case.get_value("NINST_{}".format(comp))
# Adjust expected initial run and post-DA numbers
@@ -146,12 +154,13 @@
else:
expect(False, "ERROR: Unrecognized line ('{}') found in {}".format(line, fname))
- # End of for loop
+ # End for
expect(found_caseroot, "ERROR: No caseroot found in {}".format(fname))
expect(found_cycle, "ERROR: No cycle found in {}".format(fname))
expect(found_signal == expected_signal,
"ERROR: Expected {} post-DA resume signal message(s), {} found in {}".format(expected_signal, found_signal, fname))
expect(found_init == expected_init,
"ERROR: Expected {} Initial run message(s), {} found in {}".format(expected_init, found_init, fname))
- # End of with
+ # End with
cycle_num = cycle_num + 1
+ # End for
| {"golden_diff": "diff --git a/scripts/lib/CIME/SystemTests/dae.py b/scripts/lib/CIME/SystemTests/dae.py\n--- a/scripts/lib/CIME/SystemTests/dae.py\n+++ b/scripts/lib/CIME/SystemTests/dae.py\n@@ -84,6 +84,12 @@\n \n # CONTINUE_RUN ends up TRUE, set it back in case this is a re-run.\n self._case.set_value(\"CONTINUE_RUN\", False)\n+ # Turn off post DA in case this is a re-run\n+ for comp in self._case.get_values(\"COMP_CLASSES\"):\n+ if comp == \"ESP\":\n+ continue\n+ else:\n+ self._case.set_value(\"DATA_ASSIMILATION_{}\".format(comp), False)\n # Start normal run here\n self._activate_case1()\n SystemTestsCompareTwo.run_phase(self)\n@@ -115,7 +121,9 @@\n # Expect a signal from every instance of every DA component\n expected_init = 0\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n- if self._case.get_value(\"DATA_ASSIMILATION_{}\".format(comp)):\n+ if comp == \"ESP\":\n+ continue\n+ elif self._case.get_value(\"DATA_ASSIMILATION_{}\".format(comp)):\n expected_init = expected_init + self._case.get_value(\"NINST_{}\".format(comp))\n \n # Adjust expected initial run and post-DA numbers\n@@ -146,12 +154,13 @@\n else:\n expect(False, \"ERROR: Unrecognized line ('{}') found in {}\".format(line, fname))\n \n- # End of for loop\n+ # End for\n expect(found_caseroot, \"ERROR: No caseroot found in {}\".format(fname))\n expect(found_cycle, \"ERROR: No cycle found in {}\".format(fname))\n expect(found_signal == expected_signal,\n \"ERROR: Expected {} post-DA resume signal message(s), {} found in {}\".format(expected_signal, found_signal, fname))\n expect(found_init == expected_init,\n \"ERROR: Expected {} Initial run message(s), {} found in {}\".format(expected_init, found_init, fname))\n- # End of with\n+ # End with\n cycle_num = cycle_num + 1\n+ # End for\n", "issue": "PR #2759 has caused DAE.f19_f19.A to fail during phase RUN\nThe errors can be reproduced on anlworkstation by running the following cime_developer test:\r\n`./create_test DAE.f19_f19.A --machine=anlworkstation --test-root=****`\r\n\r\nErrors were:\r\n```\r\nException during run:\r\nERROR: ERROR: Expected 1 Initial run message(s), 0 found in DAE.f19_f19.A.anlworkstation_gnu.xxxx/run/case2run/da.log.xxxx.gz\r\nTraceback (most recent call last):\r\n File \"scripts/lib/CIME/SystemTests/system_tests_common.py\", line 145, in run\r\n self.run_phase()\r\n File \"scripts/lib/CIME/SystemTests/dae.py\", line 155, in run_phase\r\n \"ERROR: Expected {} Initial run message(s), {} found in {}\".format(expected_init, found_init, fname))\r\n File \"scripts/lib/CIME/utils.py\", line 130, in expect\r\n raise exc_type(msg)\r\nSystemExit: ERROR: ERROR: Expected 1 Initial run message(s), 0 found in DAE.f19_f19.A.anlworkstation_gnu.xxxx/run/case2run/da.log.xxxx.gz\r\n```\r\nDo we need some settings on anlworkstation to make it work?\n", "before_files": [{"content": "\"\"\"\nImplementation of the CIME data assimilation test:\nCompares standard run with run broken into two data assimilation cycles.\nRuns a simple DA script on each cycle which performs checks but does not\nchange any model state (restart files). Compares answers of two runs.\n\n\"\"\"\n\nimport os.path\nimport logging\nimport glob\nimport gzip\n\nimport CIME.XML.standard_module_setup as sms\nfrom CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\nfrom CIME.utils import expect\n\n###############################################################################\nclass DAE(SystemTestsCompareTwo):\n###############################################################################\n \"\"\"\n Implementation of the CIME data assimilation test:\n Compares standard run with a run broken into two data assimilation cycles.\n Runs a simple DA script on each cycle which performs checks but does not\n change any model state (restart files). Compares answers of two runs.\n Refers to a faux data assimilation script in the\n cime/scripts/data_assimilation directory\n \"\"\"\n\n ###########################################################################\n def __init__(self, case):\n ###########################################################################\n SystemTestsCompareTwo.__init__(self, case,\n separate_builds=False,\n run_two_suffix='da',\n run_one_description='no data assimilation',\n run_two_description='data assimilation')\n\n ###########################################################################\n def _case_one_setup(self):\n ###########################################################################\n # Even though there may be test mods turning on data assimilation,\n # case1 is the control so turn it off\n self._case.set_value(\"DATA_ASSIMILATION_SCRIPT\", \"\")\n self._case.set_value(\"DATA_ASSIMILATION_CYCLES\", 1)\n\n ###########################################################################\n def _case_two_setup(self):\n ###########################################################################\n # Allow testmods to set an assimilation script\n if len(self._case.get_value(\"DATA_ASSIMILATION_SCRIPT\")) == 0:\n # We need to find the scripts/data_assimilation directory\n # LIB_DIR should be our parent dir\n da_dir = os.path.join(os.path.dirname(sms.LIB_DIR), \"data_assimilation\")\n expect(os.path.isdir(da_dir), \"ERROR: da_dir, '{}', does not exist\".format(da_dir))\n da_file = os.path.join(da_dir, \"da_no_data_mod.sh\")\n expect(os.path.isfile(da_file), \"ERROR: da_file, '{}', does not exist\".format(da_file))\n # Set up two data assimilation cycles each half of the full run\n self._case.set_value(\"DATA_ASSIMILATION_SCRIPT\", da_file)\n\n # We need at least 2 DA cycles\n da_cycles = self._case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n if da_cycles < 2:\n da_cycles = 2\n self._case.set_value(\"DATA_ASSIMILATION_CYCLES\", da_cycles)\n stopn = self._case.get_value(\"STOP_N\")\n expect((stopn % da_cycles) == 0, \"ERROR: DAE test with {0} cycles requires that STOP_N be divisible by {0}\".format(da_cycles))\n stopn = int(stopn / da_cycles)\n self._case.set_value(\"STOP_N\", stopn)\n\n self._case.flush()\n\n ###########################################################################\n def run_phase(self): # pylint: disable=arguments-differ\n ###########################################################################\n # Clean up any da.log files in case this is a re-run.\n self._activate_case2()\n case_root = self._get_caseroot2()\n rundir2 = self._case.get_value(\"RUNDIR\")\n da_files = glob.glob(os.path.join(rundir2, 'da.log.*'))\n for file_ in da_files:\n os.remove(file_)\n # End for\n\n # CONTINUE_RUN ends up TRUE, set it back in case this is a re-run.\n self._case.set_value(\"CONTINUE_RUN\", False)\n # Start normal run here\n self._activate_case1()\n SystemTestsCompareTwo.run_phase(self)\n\n # Do some checks on the data assimilation 'output' from case2\n self._activate_case2()\n da_files = glob.glob(os.path.join(rundir2, 'da.log.*'))\n if da_files is None:\n logger = logging.getLogger(__name__)\n path = os.path.join(case_root, 'da.log.*')\n logger.warning(\"No DA files in {}\".format(path))\n\n da_cycles = self._case.get_value(\"DATA_ASSIMILATION_CYCLES\")\n expect((da_files is not None) and (len(da_files) == da_cycles),\n \"ERROR: There were {:d} DA cycles in run but {:d} DA files were found\".format(da_cycles, len(da_files) if da_files is not None else 0))\n da_files.sort()\n cycle_num = 0\n compset = self._case.get_value(\"COMPSET\")\n # Special case for DWAV so we can make sure other variables are set\n is_dwav = '_DWAV' in compset\n for fname in da_files:\n found_caseroot = False\n found_cycle = False\n found_signal = 0\n found_init = 0\n if is_dwav:\n expected_init = self._case.get_value(\"NINST_WAV\")\n else:\n # Expect a signal from every instance of every DA component\n expected_init = 0\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n if self._case.get_value(\"DATA_ASSIMILATION_{}\".format(comp)):\n expected_init = expected_init + self._case.get_value(\"NINST_{}\".format(comp))\n\n # Adjust expected initial run and post-DA numbers\n if cycle_num == 0:\n expected_signal = 0\n else:\n expected_signal = expected_init\n expected_init = 0\n\n with gzip.open(fname, \"r\") as dfile:\n for bline in dfile:\n line = bline.decode(\"utf-8\")\n expect(not 'ERROR' in line, \"ERROR, error line {} found in {}\".format(line, fname))\n if 'caseroot' in line[0:8]:\n found_caseroot = True\n elif 'cycle' in line[0:5]:\n found_cycle = True\n expect(int(line[7:]) == cycle_num,\n \"ERROR: Wrong cycle ({:d}) found in {} (expected {:d})\".format(int(line[7:]), fname, cycle_num))\n elif 'resume signal' in line:\n found_signal = found_signal + 1\n expect('Post-DA resume signal found' in line[0:27],\n \"ERROR: bad post-DA message found in {}\".format(fname))\n elif 'Initial run' in line:\n found_init = found_init + 1\n expect('Initial run signal found' in line[0:24],\n \"ERROR: bad Initial run message found in {}\".format(fname))\n else:\n expect(False, \"ERROR: Unrecognized line ('{}') found in {}\".format(line, fname))\n\n # End of for loop\n expect(found_caseroot, \"ERROR: No caseroot found in {}\".format(fname))\n expect(found_cycle, \"ERROR: No cycle found in {}\".format(fname))\n expect(found_signal == expected_signal,\n \"ERROR: Expected {} post-DA resume signal message(s), {} found in {}\".format(expected_signal, found_signal, fname))\n expect(found_init == expected_init,\n \"ERROR: Expected {} Initial run message(s), {} found in {}\".format(expected_init, found_init, fname))\n # End of with\n cycle_num = cycle_num + 1\n", "path": "scripts/lib/CIME/SystemTests/dae.py"}]} | 2,833 | 500 |
gh_patches_debug_39639 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-959 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stuck in "Waiting for GIL"
Hi,
We found a thread that seems blocked forever:
```
gdb python 30107
```
Here's the single thread:
```
(gdb) info threads
Id Target Id Frame
* 1 Thread 0x7f1dd852e4c0 (LWP 30107) "/opt/simon/rele" 0x00007f1dd7d2e146 in do_futex_wait.constprop () from /lib64/libpthread.so.0
```
Here's the backtrace:
```
raceback (most recent call first):
Waiting for the GIL
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/sampler.py", line 88, in sample
with self._lock:
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py", line 251, in start_span
if self.priority_sampler.sample(span):
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py", line 376, in trace
span_type=span_type,
File "/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/contrib/django/cache.py", line 56, in wrapped
with tracer.trace('django.cache', span_type=TYPE, service=cache_service_name) as span:
```
I'm not sure how to troubleshoot from here. Have you seen this before, or have any suggestions on what I could look at? It's the only thread in process so nothing should be holding the GIL.
</issue>
<code>
[start of ddtrace/sampler.py]
1 """Samplers manage the client-side trace sampling
2
3 Any `sampled = False` trace won't be written, and can be ignored by the instrumentation.
4 """
5 from threading import Lock
6
7 from .compat import iteritems
8 from .internal.logger import get_logger
9
10 log = get_logger(__name__)
11
12 MAX_TRACE_ID = 2 ** 64
13
14 # Has to be the same factor and key as the Agent to allow chained sampling
15 KNUTH_FACTOR = 1111111111111111111
16
17
18 class AllSampler(object):
19 """Sampler sampling all the traces"""
20
21 def sample(self, span):
22 return True
23
24
25 class RateSampler(object):
26 """Sampler based on a rate
27
28 Keep (100 * `sample_rate`)% of the traces.
29 It samples randomly, its main purpose is to reduce the instrumentation footprint.
30 """
31
32 def __init__(self, sample_rate=1):
33 if sample_rate <= 0:
34 log.error('sample_rate is negative or null, disable the Sampler')
35 sample_rate = 1
36 elif sample_rate > 1:
37 sample_rate = 1
38
39 self.set_sample_rate(sample_rate)
40
41 log.debug('initialized RateSampler, sample %s%% of traces', 100 * sample_rate)
42
43 def set_sample_rate(self, sample_rate):
44 self.sample_rate = sample_rate
45 self.sampling_id_threshold = sample_rate * MAX_TRACE_ID
46
47 def sample(self, span):
48 sampled = ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self.sampling_id_threshold
49
50 return sampled
51
52
53 def _key(service=None, env=None):
54 service = service or ''
55 env = env or ''
56 return 'service:' + service + ',env:' + env
57
58
59 _default_key = _key()
60
61
62 class RateByServiceSampler(object):
63 """Sampler based on a rate, by service
64
65 Keep (100 * `sample_rate`)% of the traces.
66 The sample rate is kept independently for each service/env tuple.
67 """
68
69 def __init__(self, sample_rate=1):
70 self._lock = Lock()
71 self._by_service_samplers = {}
72 self._by_service_samplers[_default_key] = RateSampler(sample_rate)
73
74 def _set_sample_rate_by_key(self, sample_rate, key):
75 with self._lock:
76 if key in self._by_service_samplers:
77 self._by_service_samplers[key].set_sample_rate(sample_rate)
78 else:
79 self._by_service_samplers[key] = RateSampler(sample_rate)
80
81 def set_sample_rate(self, sample_rate, service='', env=''):
82 self._set_sample_rate_by_key(sample_rate, _key(service, env))
83
84 def sample(self, span):
85 tags = span.tracer().tags
86 env = tags['env'] if 'env' in tags else None
87 key = _key(span.service, env)
88 with self._lock:
89 if key in self._by_service_samplers:
90 return self._by_service_samplers[key].sample(span)
91 return self._by_service_samplers[_default_key].sample(span)
92
93 def set_sample_rate_by_service(self, rate_by_service):
94 for key, sample_rate in iteritems(rate_by_service):
95 self._set_sample_rate_by_key(sample_rate, key)
96 with self._lock:
97 for key in list(self._by_service_samplers):
98 if key not in rate_by_service and key != _default_key:
99 del self._by_service_samplers[key]
100
[end of ddtrace/sampler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/sampler.py b/ddtrace/sampler.py
--- a/ddtrace/sampler.py
+++ b/ddtrace/sampler.py
@@ -2,8 +2,6 @@
Any `sampled = False` trace won't be written, and can be ignored by the instrumentation.
"""
-from threading import Lock
-
from .compat import iteritems
from .internal.logger import get_logger
@@ -50,15 +48,6 @@
return sampled
-def _key(service=None, env=None):
- service = service or ''
- env = env or ''
- return 'service:' + service + ',env:' + env
-
-
-_default_key = _key()
-
-
class RateByServiceSampler(object):
"""Sampler based on a rate, by service
@@ -66,34 +55,40 @@
The sample rate is kept independently for each service/env tuple.
"""
+ @staticmethod
+ def _key(service=None, env=None):
+ """Compute a key with the same format used by the Datadog agent API."""
+ service = service or ''
+ env = env or ''
+ return 'service:' + service + ',env:' + env
+
def __init__(self, sample_rate=1):
- self._lock = Lock()
- self._by_service_samplers = {}
- self._by_service_samplers[_default_key] = RateSampler(sample_rate)
+ self.sample_rate = sample_rate
+ self._by_service_samplers = self._get_new_by_service_sampler()
- def _set_sample_rate_by_key(self, sample_rate, key):
- with self._lock:
- if key in self._by_service_samplers:
- self._by_service_samplers[key].set_sample_rate(sample_rate)
- else:
- self._by_service_samplers[key] = RateSampler(sample_rate)
+ def _get_new_by_service_sampler(self):
+ return {
+ self._default_key: RateSampler(self.sample_rate)
+ }
def set_sample_rate(self, sample_rate, service='', env=''):
- self._set_sample_rate_by_key(sample_rate, _key(service, env))
+ self._by_service_samplers[self._key(service, env)] = RateSampler(sample_rate)
def sample(self, span):
tags = span.tracer().tags
env = tags['env'] if 'env' in tags else None
- key = _key(span.service, env)
- with self._lock:
- if key in self._by_service_samplers:
- return self._by_service_samplers[key].sample(span)
- return self._by_service_samplers[_default_key].sample(span)
+ key = self._key(span.service, env)
+ return self._by_service_samplers.get(
+ key, self._by_service_samplers[self._default_key]
+ ).sample(span)
def set_sample_rate_by_service(self, rate_by_service):
+ new_by_service_samplers = self._get_new_by_service_sampler()
for key, sample_rate in iteritems(rate_by_service):
- self._set_sample_rate_by_key(sample_rate, key)
- with self._lock:
- for key in list(self._by_service_samplers):
- if key not in rate_by_service and key != _default_key:
- del self._by_service_samplers[key]
+ new_by_service_samplers[key] = RateSampler(sample_rate)
+
+ self._by_service_samplers = new_by_service_samplers
+
+
+# Default key for service with no specific rate
+RateByServiceSampler._default_key = RateByServiceSampler._key()
| {"golden_diff": "diff --git a/ddtrace/sampler.py b/ddtrace/sampler.py\n--- a/ddtrace/sampler.py\n+++ b/ddtrace/sampler.py\n@@ -2,8 +2,6 @@\n \n Any `sampled = False` trace won't be written, and can be ignored by the instrumentation.\n \"\"\"\n-from threading import Lock\n-\n from .compat import iteritems\n from .internal.logger import get_logger\n \n@@ -50,15 +48,6 @@\n return sampled\n \n \n-def _key(service=None, env=None):\n- service = service or ''\n- env = env or ''\n- return 'service:' + service + ',env:' + env\n-\n-\n-_default_key = _key()\n-\n-\n class RateByServiceSampler(object):\n \"\"\"Sampler based on a rate, by service\n \n@@ -66,34 +55,40 @@\n The sample rate is kept independently for each service/env tuple.\n \"\"\"\n \n+ @staticmethod\n+ def _key(service=None, env=None):\n+ \"\"\"Compute a key with the same format used by the Datadog agent API.\"\"\"\n+ service = service or ''\n+ env = env or ''\n+ return 'service:' + service + ',env:' + env\n+\n def __init__(self, sample_rate=1):\n- self._lock = Lock()\n- self._by_service_samplers = {}\n- self._by_service_samplers[_default_key] = RateSampler(sample_rate)\n+ self.sample_rate = sample_rate\n+ self._by_service_samplers = self._get_new_by_service_sampler()\n \n- def _set_sample_rate_by_key(self, sample_rate, key):\n- with self._lock:\n- if key in self._by_service_samplers:\n- self._by_service_samplers[key].set_sample_rate(sample_rate)\n- else:\n- self._by_service_samplers[key] = RateSampler(sample_rate)\n+ def _get_new_by_service_sampler(self):\n+ return {\n+ self._default_key: RateSampler(self.sample_rate)\n+ }\n \n def set_sample_rate(self, sample_rate, service='', env=''):\n- self._set_sample_rate_by_key(sample_rate, _key(service, env))\n+ self._by_service_samplers[self._key(service, env)] = RateSampler(sample_rate)\n \n def sample(self, span):\n tags = span.tracer().tags\n env = tags['env'] if 'env' in tags else None\n- key = _key(span.service, env)\n- with self._lock:\n- if key in self._by_service_samplers:\n- return self._by_service_samplers[key].sample(span)\n- return self._by_service_samplers[_default_key].sample(span)\n+ key = self._key(span.service, env)\n+ return self._by_service_samplers.get(\n+ key, self._by_service_samplers[self._default_key]\n+ ).sample(span)\n \n def set_sample_rate_by_service(self, rate_by_service):\n+ new_by_service_samplers = self._get_new_by_service_sampler()\n for key, sample_rate in iteritems(rate_by_service):\n- self._set_sample_rate_by_key(sample_rate, key)\n- with self._lock:\n- for key in list(self._by_service_samplers):\n- if key not in rate_by_service and key != _default_key:\n- del self._by_service_samplers[key]\n+ new_by_service_samplers[key] = RateSampler(sample_rate)\n+\n+ self._by_service_samplers = new_by_service_samplers\n+\n+\n+# Default key for service with no specific rate\n+RateByServiceSampler._default_key = RateByServiceSampler._key()\n", "issue": "Stuck in \"Waiting for GIL\"\nHi,\r\n\r\nWe found a thread that seems blocked forever:\r\n\r\n```\r\ngdb python 30107\r\n```\r\n\r\nHere's the single thread:\r\n\r\n```\r\n(gdb) info threads\r\n Id Target Id Frame\r\n* 1 Thread 0x7f1dd852e4c0 (LWP 30107) \"/opt/simon/rele\" 0x00007f1dd7d2e146 in do_futex_wait.constprop () from /lib64/libpthread.so.0\r\n```\r\n\r\nHere's the backtrace:\r\n\r\n```\r\nraceback (most recent call first):\r\n Waiting for the GIL\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/sampler.py\", line 88, in sample\r\n with self._lock:\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py\", line 251, in start_span\r\n if self.priority_sampler.sample(span):\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/tracer.py\", line 376, in trace\r\n span_type=span_type,\r\n File \"/opt/simon/releases/b038662ce7d140609b1e4883a3ea0edf77851537/venv/lib/python2.7/site-packages/ddtrace/contrib/django/cache.py\", line 56, in wrapped\r\n with tracer.trace('django.cache', span_type=TYPE, service=cache_service_name) as span:\r\n```\r\n\r\nI'm not sure how to troubleshoot from here. Have you seen this before, or have any suggestions on what I could look at? It's the only thread in process so nothing should be holding the GIL.\n", "before_files": [{"content": "\"\"\"Samplers manage the client-side trace sampling\n\nAny `sampled = False` trace won't be written, and can be ignored by the instrumentation.\n\"\"\"\nfrom threading import Lock\n\nfrom .compat import iteritems\nfrom .internal.logger import get_logger\n\nlog = get_logger(__name__)\n\nMAX_TRACE_ID = 2 ** 64\n\n# Has to be the same factor and key as the Agent to allow chained sampling\nKNUTH_FACTOR = 1111111111111111111\n\n\nclass AllSampler(object):\n \"\"\"Sampler sampling all the traces\"\"\"\n\n def sample(self, span):\n return True\n\n\nclass RateSampler(object):\n \"\"\"Sampler based on a rate\n\n Keep (100 * `sample_rate`)% of the traces.\n It samples randomly, its main purpose is to reduce the instrumentation footprint.\n \"\"\"\n\n def __init__(self, sample_rate=1):\n if sample_rate <= 0:\n log.error('sample_rate is negative or null, disable the Sampler')\n sample_rate = 1\n elif sample_rate > 1:\n sample_rate = 1\n\n self.set_sample_rate(sample_rate)\n\n log.debug('initialized RateSampler, sample %s%% of traces', 100 * sample_rate)\n\n def set_sample_rate(self, sample_rate):\n self.sample_rate = sample_rate\n self.sampling_id_threshold = sample_rate * MAX_TRACE_ID\n\n def sample(self, span):\n sampled = ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self.sampling_id_threshold\n\n return sampled\n\n\ndef _key(service=None, env=None):\n service = service or ''\n env = env or ''\n return 'service:' + service + ',env:' + env\n\n\n_default_key = _key()\n\n\nclass RateByServiceSampler(object):\n \"\"\"Sampler based on a rate, by service\n\n Keep (100 * `sample_rate`)% of the traces.\n The sample rate is kept independently for each service/env tuple.\n \"\"\"\n\n def __init__(self, sample_rate=1):\n self._lock = Lock()\n self._by_service_samplers = {}\n self._by_service_samplers[_default_key] = RateSampler(sample_rate)\n\n def _set_sample_rate_by_key(self, sample_rate, key):\n with self._lock:\n if key in self._by_service_samplers:\n self._by_service_samplers[key].set_sample_rate(sample_rate)\n else:\n self._by_service_samplers[key] = RateSampler(sample_rate)\n\n def set_sample_rate(self, sample_rate, service='', env=''):\n self._set_sample_rate_by_key(sample_rate, _key(service, env))\n\n def sample(self, span):\n tags = span.tracer().tags\n env = tags['env'] if 'env' in tags else None\n key = _key(span.service, env)\n with self._lock:\n if key in self._by_service_samplers:\n return self._by_service_samplers[key].sample(span)\n return self._by_service_samplers[_default_key].sample(span)\n\n def set_sample_rate_by_service(self, rate_by_service):\n for key, sample_rate in iteritems(rate_by_service):\n self._set_sample_rate_by_key(sample_rate, key)\n with self._lock:\n for key in list(self._by_service_samplers):\n if key not in rate_by_service and key != _default_key:\n del self._by_service_samplers[key]\n", "path": "ddtrace/sampler.py"}]} | 2,027 | 815 |
gh_patches_debug_10488 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add binder links to generated example notebooks
Stumbled upon at #638
* Binder is a cool tool I'd love to learn more about
* https://sphinx-gallery.github.io/configuration.html#binder-links is pretty cool
* https://github.com/mne-tools/mne-bids/pull/207/files is an implementation
</issue>
<code>
[start of docs/conf.py]
1 #!/usr/bin/env python3.6
2 # -*- coding: utf-8 -*-
3 #
4 # PlasmaPy documentation build configuration file, created by
5 # sphinx-quickstart on Wed May 31 18:16:46 2017.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20
21 import os
22 import sys
23
24 sys.path.insert(0, os.path.abspath('..'))
25
26 # -- General configuration ------------------------------------------------
27
28 # If your documentation needs a minimal Sphinx version, state it here.
29 #
30 # needs_sphinx = '1.0'
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax',
36 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx',
37 'sphinx_automodapi.automodapi',
38 'sphinx_automodapi.smart_resolver',
39 'sphinx_gallery.gen_gallery',
40 'sphinx.ext.graphviz']
41
42
43 intersphinx_mapping = {
44 'python': ('https://docs.python.org/3', None),
45 'numpy': ('https://docs.scipy.org/doc/numpy', None),
46 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
47 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
48 'astropy': ('http://docs.astropy.org/en/stable/', None)}
49 # Add any paths that contain templates here, relative to this directory.
50 templates_path = ['_templates']
51
52 # The suffix(es) of source filenames.
53 # You can specify multiple suffix as a list of string:
54 #
55 # source_suffix = ['.rst', '.md']
56 source_suffix = '.rst'
57
58 # The master toctree document.
59 master_doc = 'index'
60
61 # General information about the project.
62 project = 'PlasmaPy'
63 copyright = '2015-2019, PlasmaPy Community'
64 author = 'PlasmaPy Community'
65
66 # The version info for the project you're documenting, acts as replacement for
67 # |version| and |release|, also used in various other places throughout the
68 # built documents.
69 #
70 # The short X.Y version.
71 version = '0.2'
72 # The full version, including alpha/beta/rc tags.
73 release = '0.2.0'
74
75 # The language for content autogenerated by Sphinx. Refer to documentation
76 # for a list of supported languages.
77 #
78 # This is also used if you do content translation via gettext catalogs.
79 # Usually you set "language" from the command line for these cases.
80 language = None
81
82 # List of patterns, relative to source directory, that match files and
83 # directories to ignore when looking for source files.
84 # This patterns also effect to html_static_path and html_extra_path
85 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
86
87 # The name of the Pygments (syntax highlighting) style to use.
88 pygments_style = 'sphinx'
89
90 # If true, `todo` and `todoList` produce output, else they produce nothing.
91 todo_include_todos = False
92
93 default_role = 'obj'
94
95 # -- Options for HTML output ----------------------------------------------
96
97 # The theme to use for HTML and HTML Help pages. See the documentation for
98 # a list of builtin themes.
99 #
100 # html_theme = 'alabaster'
101 # html_theme = 'traditional'
102 # html_theme = 'agogo'
103 html_theme = "sphinx_rtd_theme"
104
105 # Theme options are theme-specific and customize the look and feel of a theme
106 # further. For a list of options available for each theme, see the
107 # documentation.
108 #
109 # html_theme_options = {}
110
111 # Add any paths that contain custom static files (such as style sheets) here,
112 # relative to this directory. They are copied after the builtin static files,
113 # so a file named "default.css" will overwrite the builtin "default.css".
114 # html_static_path = ['_static']
115
116
117 # -- Options for HTMLHelp output ------------------------------------------
118
119 # Output file base name for HTML help builder.
120 htmlhelp_basename = 'PlasmaPydoc'
121
122
123 # -- Options for LaTeX output ---------------------------------------------
124
125 latex_elements = {
126 # The paper size ('letterpaper' or 'a4paper').
127 #
128 # 'papersize': 'letterpaper',
129
130 # The font size ('10pt', '11pt' or '12pt').
131 #
132 # 'pointsize': '10pt',
133
134 # Additional stuff for the LaTeX preamble.
135 #
136 # 'preamble': '',
137
138 # Latex figure (float) alignment
139 #
140 # 'figure_align': 'htbp',
141 }
142
143 # Grouping the document tree into LaTeX files. List of tuples
144 # (source start file, target name, title,
145 # author, documentclass [howto, manual, or own class]).
146 latex_documents = [
147 (master_doc, 'PlasmaPy.tex', 'PlasmaPy Documentation',
148 'PlasmaPy Community', 'manual'),
149 ]
150
151
152 # -- Options for manual page output ---------------------------------------
153
154 # One entry per manual page. List of tuples
155 # (source start file, name, description, authors, manual section).
156 man_pages = [
157 (master_doc, 'plasmapy', 'PlasmaPy Documentation',
158 [author], 1)
159 ]
160
161
162 # -- Options for Texinfo output -------------------------------------------
163
164 # Grouping the document tree into Texinfo files. List of tuples
165 # (source start file, target name, title, author,
166 # dir menu entry, description, category)
167 texinfo_documents = [
168 (master_doc, 'PlasmaPy', 'PlasmaPy Documentation',
169 author, 'PlasmaPy', 'Python package for plasma physics',
170 'Miscellaneous'),
171 ]
172
173 html_favicon = "./_static/icon.ico"
174
175 # -- Options for Sphinx Gallery -----------------
176
177 sphinx_gallery_conf = {
178 # path to your examples scripts
179 'examples_dirs': '../plasmapy/examples',
180 # path where to save gallery generated examples
181 'backreferences_dir': 'gen_modules/backreferences',
182 'gallery_dirs': 'auto_examples',
183 }
184
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -180,4 +180,15 @@
# path where to save gallery generated examples
'backreferences_dir': 'gen_modules/backreferences',
'gallery_dirs': 'auto_examples',
+ 'binder': {
+ # Required keys
+ 'org': 'PlasmaPy',
+ 'repo': 'PlasmaPy',
+ 'branch': 'master', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
+ 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).
+ 'dependencies': [
+ '../requirements/environment.yml'
+ ],
+ 'use_jupyter_lab': True,
+ },
}
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -180,4 +180,15 @@\n # path where to save gallery generated examples\n 'backreferences_dir': 'gen_modules/backreferences',\n 'gallery_dirs': 'auto_examples',\n+ 'binder': {\n+ # Required keys\n+ 'org': 'PlasmaPy',\n+ 'repo': 'PlasmaPy',\n+ 'branch': 'master', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n+ 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n+ 'dependencies': [\n+ '../requirements/environment.yml'\n+ ],\n+ 'use_jupyter_lab': True,\n+ },\n }\n", "issue": "Add binder links to generated example notebooks\nStumbled upon at #638 \r\n\r\n* Binder is a cool tool I'd love to learn more about\r\n* https://sphinx-gallery.github.io/configuration.html#binder-links is pretty cool\r\n\t* https://github.com/mne-tools/mne-bids/pull/207/files is an implementation\n", "before_files": [{"content": "#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n#\n# PlasmaPy documentation build configuration file, created by\n# sphinx-quickstart on Wed May 31 18:16:46 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx',\n 'sphinx_automodapi.automodapi',\n 'sphinx_automodapi.smart_resolver',\n 'sphinx_gallery.gen_gallery',\n 'sphinx.ext.graphviz']\n\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),\n 'astropy': ('http://docs.astropy.org/en/stable/', None)}\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'PlasmaPy'\ncopyright = '2015-2019, PlasmaPy Community'\nauthor = 'PlasmaPy Community'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.2'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.2.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\ndefault_role = 'obj'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n# html_theme = 'traditional'\n# html_theme = 'agogo'\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'PlasmaPydoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'PlasmaPy.tex', 'PlasmaPy Documentation',\n 'PlasmaPy Community', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'plasmapy', 'PlasmaPy Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'PlasmaPy', 'PlasmaPy Documentation',\n author, 'PlasmaPy', 'Python package for plasma physics',\n 'Miscellaneous'),\n]\n\nhtml_favicon = \"./_static/icon.ico\"\n\n# -- Options for Sphinx Gallery -----------------\n\nsphinx_gallery_conf = {\n # path to your examples scripts\n 'examples_dirs': '../plasmapy/examples',\n # path where to save gallery generated examples\n 'backreferences_dir': 'gen_modules/backreferences',\n 'gallery_dirs': 'auto_examples',\n}\n", "path": "docs/conf.py"}]} | 2,480 | 215 |
gh_patches_debug_32598 | rasdani/github-patches | git_diff | apache__airflow-15680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MongoToS3Operator failed when running with a single query (not aggregate pipeline)
**Apache Airflow version**: 2.0.2
**What happened**:
`MongoToS3Operator` failed when running with a single query (not aggregate pipeline):
```sh
Traceback (most recent call last):
File "/home/airflow//bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow//lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow//lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/cli.py", line 89, in wrapper
return f(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 385, in task_test
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1413, in run
self._run_raw_task(
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1138, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow//lib/python3.8/site-packages/airflow/providers/amazon/aws/transfers/mongo_to_s3.py", line 116, in execute
results = MongoHook(self.mongo_conn_id).find(
File "/home/airflow//lib/python3.8/site-packages/airflow/providers/mongo/hooks/mongo.py", line 144, in find
return collection.find(query, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/pymongo/collection.py", line 1523, in find
return Cursor(self, *args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'allowDiskUse'
```
**What you expected to happen**:
I expect the data from MongoDB to be exported to a file in S3 with no errors.
**How to reproduce it**:
Run the following operator with a single `mongo_query` (no aggregate pipeline):
```python
export_to_s3 = MongoToS3Operator(
task_id='export_to_s3',
mongo_conn_id=Variable.get('mongo_conn_id'),
s3_conn_id=Variable.get('aws_conn_id'),
mongo_collection='my_mongo_collection',
mongo_query={},
s3_bucket=Variable.get('s3_bucket'),
s3_key="my_data.json",
replace=True,
dag=dag,
)
```
</issue>
<code>
[start of airflow/providers/amazon/aws/transfers/mongo_to_s3.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 import json
19 import warnings
20 from typing import Any, Iterable, Optional, Union, cast
21
22 from bson import json_util
23
24 from airflow.models import BaseOperator
25 from airflow.providers.amazon.aws.hooks.s3 import S3Hook
26 from airflow.providers.mongo.hooks.mongo import MongoHook
27
28 _DEPRECATION_MSG = (
29 "The s3_conn_id parameter has been deprecated. You should pass instead the aws_conn_id parameter."
30 )
31
32
33 class MongoToS3Operator(BaseOperator):
34 """Operator meant to move data from mongo via pymongo to s3 via boto.
35
36 :param mongo_conn_id: reference to a specific mongo connection
37 :type mongo_conn_id: str
38 :param aws_conn_id: reference to a specific S3 connection
39 :type aws_conn_id: str
40 :param mongo_collection: reference to a specific collection in your mongo db
41 :type mongo_collection: str
42 :param mongo_query: query to execute. A list including a dict of the query
43 :type mongo_query: list
44 :param s3_bucket: reference to a specific S3 bucket to store the data
45 :type s3_bucket: str
46 :param s3_key: in which S3 key the file will be stored
47 :type s3_key: str
48 :param mongo_db: reference to a specific mongo database
49 :type mongo_db: str
50 :param replace: whether or not to replace the file in S3 if it previously existed
51 :type replace: bool
52 :param allow_disk_use: in the case you are retrieving a lot of data, you may have
53 to use the disk to save it instead of saving all in the RAM
54 :type allow_disk_use: bool
55 :param compression: type of compression to use for output file in S3. Currently only gzip is supported.
56 :type compression: str
57 """
58
59 template_fields = ('s3_bucket', 's3_key', 'mongo_query', 'mongo_collection')
60 ui_color = '#589636'
61 template_fields_renderers = {"mongo_query": "py"}
62 # pylint: disable=too-many-instance-attributes
63
64 def __init__(
65 self,
66 *,
67 s3_conn_id: Optional[str] = None,
68 mongo_conn_id: str = 'mongo_default',
69 aws_conn_id: str = 'aws_default',
70 mongo_collection: str,
71 mongo_query: Union[list, dict],
72 s3_bucket: str,
73 s3_key: str,
74 mongo_db: Optional[str] = None,
75 replace: bool = False,
76 allow_disk_use: bool = False,
77 compression: Optional[str] = None,
78 **kwargs,
79 ) -> None:
80 super().__init__(**kwargs)
81 if s3_conn_id:
82 warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
83 aws_conn_id = s3_conn_id
84
85 self.mongo_conn_id = mongo_conn_id
86 self.aws_conn_id = aws_conn_id
87 self.mongo_db = mongo_db
88 self.mongo_collection = mongo_collection
89
90 # Grab query and determine if we need to run an aggregate pipeline
91 self.mongo_query = mongo_query
92 self.is_pipeline = isinstance(self.mongo_query, list)
93
94 self.s3_bucket = s3_bucket
95 self.s3_key = s3_key
96 self.replace = replace
97 self.allow_disk_use = allow_disk_use
98 self.compression = compression
99
100 def execute(self, context) -> bool:
101 """Is written to depend on transform method"""
102 s3_conn = S3Hook(self.aws_conn_id)
103
104 # Grab collection and execute query according to whether or not it is a pipeline
105 if self.is_pipeline:
106 results = MongoHook(self.mongo_conn_id).aggregate(
107 mongo_collection=self.mongo_collection,
108 aggregate_query=cast(list, self.mongo_query),
109 mongo_db=self.mongo_db,
110 allowDiskUse=self.allow_disk_use,
111 )
112
113 else:
114 results = MongoHook(self.mongo_conn_id).find(
115 mongo_collection=self.mongo_collection,
116 query=cast(dict, self.mongo_query),
117 mongo_db=self.mongo_db,
118 allowDiskUse=self.allow_disk_use,
119 )
120
121 # Performs transform then stringifies the docs results into json format
122 docs_str = self._stringify(self.transform(results))
123
124 s3_conn.load_string(
125 string_data=docs_str,
126 key=self.s3_key,
127 bucket_name=self.s3_bucket,
128 replace=self.replace,
129 compression=self.compression,
130 )
131
132 @staticmethod
133 def _stringify(iterable: Iterable, joinable: str = '\n') -> str:
134 """
135 Takes an iterable (pymongo Cursor or Array) containing dictionaries and
136 returns a stringified version using python join
137 """
138 return joinable.join([json.dumps(doc, default=json_util.default) for doc in iterable])
139
140 @staticmethod
141 def transform(docs: Any) -> Any:
142 """This method is meant to be extended by child classes
143 to perform transformations unique to those operators needs.
144 Processes pyMongo cursor and returns an iterable with each element being
145 a JSON serializable dictionary
146
147 Base transform() assumes no processing is needed
148 ie. docs is a pyMongo cursor of documents and cursor just
149 needs to be passed through
150
151 Override this method for custom transformations
152 """
153 return docs
154
[end of airflow/providers/amazon/aws/transfers/mongo_to_s3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/providers/amazon/aws/transfers/mongo_to_s3.py b/airflow/providers/amazon/aws/transfers/mongo_to_s3.py
--- a/airflow/providers/amazon/aws/transfers/mongo_to_s3.py
+++ b/airflow/providers/amazon/aws/transfers/mongo_to_s3.py
@@ -40,7 +40,7 @@
:param mongo_collection: reference to a specific collection in your mongo db
:type mongo_collection: str
:param mongo_query: query to execute. A list including a dict of the query
- :type mongo_query: list
+ :type mongo_query: Union[list, dict]
:param s3_bucket: reference to a specific S3 bucket to store the data
:type s3_bucket: str
:param s3_key: in which S3 key the file will be stored
@@ -49,8 +49,8 @@
:type mongo_db: str
:param replace: whether or not to replace the file in S3 if it previously existed
:type replace: bool
- :param allow_disk_use: in the case you are retrieving a lot of data, you may have
- to use the disk to save it instead of saving all in the RAM
+ :param allow_disk_use: enables writing to temporary files in the case you are handling large dataset.
+ This only takes effect when `mongo_query` is a list - running an aggregate pipeline
:type allow_disk_use: bool
:param compression: type of compression to use for output file in S3. Currently only gzip is supported.
:type compression: str
@@ -115,7 +115,6 @@
mongo_collection=self.mongo_collection,
query=cast(dict, self.mongo_query),
mongo_db=self.mongo_db,
- allowDiskUse=self.allow_disk_use,
)
# Performs transform then stringifies the docs results into json format
| {"golden_diff": "diff --git a/airflow/providers/amazon/aws/transfers/mongo_to_s3.py b/airflow/providers/amazon/aws/transfers/mongo_to_s3.py\n--- a/airflow/providers/amazon/aws/transfers/mongo_to_s3.py\n+++ b/airflow/providers/amazon/aws/transfers/mongo_to_s3.py\n@@ -40,7 +40,7 @@\n :param mongo_collection: reference to a specific collection in your mongo db\n :type mongo_collection: str\n :param mongo_query: query to execute. A list including a dict of the query\n- :type mongo_query: list\n+ :type mongo_query: Union[list, dict]\n :param s3_bucket: reference to a specific S3 bucket to store the data\n :type s3_bucket: str\n :param s3_key: in which S3 key the file will be stored\n@@ -49,8 +49,8 @@\n :type mongo_db: str\n :param replace: whether or not to replace the file in S3 if it previously existed\n :type replace: bool\n- :param allow_disk_use: in the case you are retrieving a lot of data, you may have\n- to use the disk to save it instead of saving all in the RAM\n+ :param allow_disk_use: enables writing to temporary files in the case you are handling large dataset.\n+ This only takes effect when `mongo_query` is a list - running an aggregate pipeline\n :type allow_disk_use: bool\n :param compression: type of compression to use for output file in S3. Currently only gzip is supported.\n :type compression: str\n@@ -115,7 +115,6 @@\n mongo_collection=self.mongo_collection,\n query=cast(dict, self.mongo_query),\n mongo_db=self.mongo_db,\n- allowDiskUse=self.allow_disk_use,\n )\n \n # Performs transform then stringifies the docs results into json format\n", "issue": "MongoToS3Operator failed when running with a single query (not aggregate pipeline)\n**Apache Airflow version**: 2.0.2\r\n\r\n**What happened**:\r\n\r\n`MongoToS3Operator` failed when running with a single query (not aggregate pipeline):\r\n\r\n```sh\r\nTraceback (most recent call last):\r\n File \"/home/airflow//bin/airflow\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/__main__.py\", line 40, in main\r\n args.func(args)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/cli/cli_parser.py\", line 48, in command\r\n return func(*args, **kwargs)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/utils/cli.py\", line 89, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/cli/commands/task_command.py\", line 385, in task_test\r\n ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py\", line 70, in wrapper\r\n return func(*args, session=session, **kwargs)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py\", line 1413, in run\r\n self._run_raw_task(\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py\", line 67, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py\", line 1138, in _run_raw_task\r\n self._prepare_and_execute_task_with_callbacks(context, task)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py\", line 1311, in _prepare_and_execute_task_with_callbacks\r\n result = self._execute_task(context, task_copy)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py\", line 1341, in _execute_task\r\n result = task_copy.execute(context=context)\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/providers/amazon/aws/transfers/mongo_to_s3.py\", line 116, in execute\r\n results = MongoHook(self.mongo_conn_id).find(\r\n File \"/home/airflow//lib/python3.8/site-packages/airflow/providers/mongo/hooks/mongo.py\", line 144, in find\r\n return collection.find(query, **kwargs)\r\n File \"/home/airflow//lib/python3.8/site-packages/pymongo/collection.py\", line 1523, in find\r\n return Cursor(self, *args, **kwargs)\r\nTypeError: __init__() got an unexpected keyword argument 'allowDiskUse'\r\n```\r\n\r\n**What you expected to happen**:\r\n\r\nI expect the data from MongoDB to be exported to a file in S3 with no errors.\r\n\r\n**How to reproduce it**:\r\n\r\nRun the following operator with a single `mongo_query` (no aggregate pipeline):\r\n\r\n```python\r\nexport_to_s3 = MongoToS3Operator(\r\n task_id='export_to_s3',\r\n mongo_conn_id=Variable.get('mongo_conn_id'),\r\n s3_conn_id=Variable.get('aws_conn_id'),\r\n mongo_collection='my_mongo_collection',\r\n mongo_query={},\r\n s3_bucket=Variable.get('s3_bucket'),\r\n s3_key=\"my_data.json\",\r\n replace=True,\r\n dag=dag,\r\n)\r\n```\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport json\nimport warnings\nfrom typing import Any, Iterable, Optional, Union, cast\n\nfrom bson import json_util\n\nfrom airflow.models import BaseOperator\nfrom airflow.providers.amazon.aws.hooks.s3 import S3Hook\nfrom airflow.providers.mongo.hooks.mongo import MongoHook\n\n_DEPRECATION_MSG = (\n \"The s3_conn_id parameter has been deprecated. You should pass instead the aws_conn_id parameter.\"\n)\n\n\nclass MongoToS3Operator(BaseOperator):\n \"\"\"Operator meant to move data from mongo via pymongo to s3 via boto.\n\n :param mongo_conn_id: reference to a specific mongo connection\n :type mongo_conn_id: str\n :param aws_conn_id: reference to a specific S3 connection\n :type aws_conn_id: str\n :param mongo_collection: reference to a specific collection in your mongo db\n :type mongo_collection: str\n :param mongo_query: query to execute. A list including a dict of the query\n :type mongo_query: list\n :param s3_bucket: reference to a specific S3 bucket to store the data\n :type s3_bucket: str\n :param s3_key: in which S3 key the file will be stored\n :type s3_key: str\n :param mongo_db: reference to a specific mongo database\n :type mongo_db: str\n :param replace: whether or not to replace the file in S3 if it previously existed\n :type replace: bool\n :param allow_disk_use: in the case you are retrieving a lot of data, you may have\n to use the disk to save it instead of saving all in the RAM\n :type allow_disk_use: bool\n :param compression: type of compression to use for output file in S3. Currently only gzip is supported.\n :type compression: str\n \"\"\"\n\n template_fields = ('s3_bucket', 's3_key', 'mongo_query', 'mongo_collection')\n ui_color = '#589636'\n template_fields_renderers = {\"mongo_query\": \"py\"}\n # pylint: disable=too-many-instance-attributes\n\n def __init__(\n self,\n *,\n s3_conn_id: Optional[str] = None,\n mongo_conn_id: str = 'mongo_default',\n aws_conn_id: str = 'aws_default',\n mongo_collection: str,\n mongo_query: Union[list, dict],\n s3_bucket: str,\n s3_key: str,\n mongo_db: Optional[str] = None,\n replace: bool = False,\n allow_disk_use: bool = False,\n compression: Optional[str] = None,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n if s3_conn_id:\n warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)\n aws_conn_id = s3_conn_id\n\n self.mongo_conn_id = mongo_conn_id\n self.aws_conn_id = aws_conn_id\n self.mongo_db = mongo_db\n self.mongo_collection = mongo_collection\n\n # Grab query and determine if we need to run an aggregate pipeline\n self.mongo_query = mongo_query\n self.is_pipeline = isinstance(self.mongo_query, list)\n\n self.s3_bucket = s3_bucket\n self.s3_key = s3_key\n self.replace = replace\n self.allow_disk_use = allow_disk_use\n self.compression = compression\n\n def execute(self, context) -> bool:\n \"\"\"Is written to depend on transform method\"\"\"\n s3_conn = S3Hook(self.aws_conn_id)\n\n # Grab collection and execute query according to whether or not it is a pipeline\n if self.is_pipeline:\n results = MongoHook(self.mongo_conn_id).aggregate(\n mongo_collection=self.mongo_collection,\n aggregate_query=cast(list, self.mongo_query),\n mongo_db=self.mongo_db,\n allowDiskUse=self.allow_disk_use,\n )\n\n else:\n results = MongoHook(self.mongo_conn_id).find(\n mongo_collection=self.mongo_collection,\n query=cast(dict, self.mongo_query),\n mongo_db=self.mongo_db,\n allowDiskUse=self.allow_disk_use,\n )\n\n # Performs transform then stringifies the docs results into json format\n docs_str = self._stringify(self.transform(results))\n\n s3_conn.load_string(\n string_data=docs_str,\n key=self.s3_key,\n bucket_name=self.s3_bucket,\n replace=self.replace,\n compression=self.compression,\n )\n\n @staticmethod\n def _stringify(iterable: Iterable, joinable: str = '\\n') -> str:\n \"\"\"\n Takes an iterable (pymongo Cursor or Array) containing dictionaries and\n returns a stringified version using python join\n \"\"\"\n return joinable.join([json.dumps(doc, default=json_util.default) for doc in iterable])\n\n @staticmethod\n def transform(docs: Any) -> Any:\n \"\"\"This method is meant to be extended by child classes\n to perform transformations unique to those operators needs.\n Processes pyMongo cursor and returns an iterable with each element being\n a JSON serializable dictionary\n\n Base transform() assumes no processing is needed\n ie. docs is a pyMongo cursor of documents and cursor just\n needs to be passed through\n\n Override this method for custom transformations\n \"\"\"\n return docs\n", "path": "airflow/providers/amazon/aws/transfers/mongo_to_s3.py"}]} | 3,053 | 427 |
gh_patches_debug_27527 | rasdani/github-patches | git_diff | getredash__redash-2503 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Athena query runner doesn't paginate schemas
<!--
#####################################################################
#
# Need support? USE THE FORUM! https://discuss.redash.io/c/support.
#
# Don't have steps to reproduce and actually not sure it's a bug?
# Use the forum! https://discuss.redash.io/c/support.
#
#####################################################################
**Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests 🌟.
Found a bug? Please fill out the sections below... thank you 👍
-->
### Issue Summary
Hi,
AWS Athena query runner paginates only tables but not schemas.
We have ~170 schemas, however, only the first 100 are shown in schema browser.
### Steps to Reproduce
1. Connect to Athena with Glue metastore which has more than 100 schemas
2. In the new query window, only 100 schemas are shown.
I've looked into the code and looks like you paginate only on tables but not on schemas (https://github.com/getredash/redash/blob/v4.0.0/redash/query_runner/athena.py#L130).
### Technical details:
* Redash Version: 4.0.0
* Browser/OS:
* How did you install Redash: docker
Kind regards,
Ievgen.
</issue>
<code>
[start of redash/query_runner/athena.py]
1 import logging
2 import os
3
4 from redash.query_runner import *
5 from redash.settings import parse_boolean
6 from redash.utils import json_dumps, json_loads
7
8 logger = logging.getLogger(__name__)
9 ANNOTATE_QUERY = parse_boolean(os.environ.get('ATHENA_ANNOTATE_QUERY', 'true'))
10 SHOW_EXTRA_SETTINGS = parse_boolean(os.environ.get('ATHENA_SHOW_EXTRA_SETTINGS', 'true'))
11 OPTIONAL_CREDENTIALS = parse_boolean(os.environ.get('ATHENA_OPTIONAL_CREDENTIALS', 'true'))
12
13 try:
14 import pyathena
15 import boto3
16 enabled = True
17 except ImportError:
18 enabled = False
19
20
21 _TYPE_MAPPINGS = {
22 'boolean': TYPE_BOOLEAN,
23 'tinyint': TYPE_INTEGER,
24 'smallint': TYPE_INTEGER,
25 'integer': TYPE_INTEGER,
26 'bigint': TYPE_INTEGER,
27 'double': TYPE_FLOAT,
28 'varchar': TYPE_STRING,
29 'timestamp': TYPE_DATETIME,
30 'date': TYPE_DATE,
31 'varbinary': TYPE_STRING,
32 'array': TYPE_STRING,
33 'map': TYPE_STRING,
34 'row': TYPE_STRING,
35 'decimal': TYPE_FLOAT,
36 }
37
38
39 class SimpleFormatter(object):
40 def format(self, operation, parameters=None):
41 return operation
42
43
44 class Athena(BaseQueryRunner):
45 noop_query = 'SELECT 1'
46
47 @classmethod
48 def name(cls):
49 return "Amazon Athena"
50
51 @classmethod
52 def configuration_schema(cls):
53 schema = {
54 'type': 'object',
55 'properties': {
56 'region': {
57 'type': 'string',
58 'title': 'AWS Region'
59 },
60 'aws_access_key': {
61 'type': 'string',
62 'title': 'AWS Access Key'
63 },
64 'aws_secret_key': {
65 'type': 'string',
66 'title': 'AWS Secret Key'
67 },
68 's3_staging_dir': {
69 'type': 'string',
70 'title': 'S3 Staging (Query Results) Bucket Path'
71 },
72 'schema': {
73 'type': 'string',
74 'title': 'Schema Name',
75 'default': 'default'
76 },
77 'glue': {
78 'type': 'boolean',
79 'title': 'Use Glue Data Catalog',
80 },
81 },
82 'required': ['region', 's3_staging_dir'],
83 'order': ['region', 'aws_access_key', 'aws_secret_key', 's3_staging_dir', 'schema'],
84 'secret': ['aws_secret_key']
85 }
86
87 if SHOW_EXTRA_SETTINGS:
88 schema['properties'].update({
89 'encryption_option': {
90 'type': 'string',
91 'title': 'Encryption Option',
92 },
93 'kms_key': {
94 'type': 'string',
95 'title': 'KMS Key',
96 },
97 })
98
99 if not OPTIONAL_CREDENTIALS:
100 schema['required'] += ['aws_access_key', 'aws_secret_key']
101
102 return schema
103
104 @classmethod
105 def enabled(cls):
106 return enabled
107
108 @classmethod
109 def annotate_query(cls):
110 return ANNOTATE_QUERY
111
112 @classmethod
113 def type(cls):
114 return "athena"
115
116 def __get_schema_from_glue(self):
117 client = boto3.client(
118 'glue',
119 aws_access_key_id=self.configuration.get('aws_access_key', None),
120 aws_secret_access_key=self.configuration.get('aws_secret_key', None),
121 region_name=self.configuration['region']
122 )
123 schema = {}
124 paginator = client.get_paginator('get_tables')
125
126 for database in client.get_databases()['DatabaseList']:
127 iterator = paginator.paginate(DatabaseName=database['Name'])
128 for table in iterator.search('TableList[]'):
129 table_name = '%s.%s' % (database['Name'], table['Name'])
130 if table_name not in schema:
131 column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']]
132 schema[table_name] = {'name': table_name, 'columns': column}
133 for partition in table.get('PartitionKeys', []):
134 schema[table_name]['columns'].append(partition['Name'])
135
136 return schema.values()
137
138 def get_schema(self, get_stats=False):
139 if self.configuration.get('glue', False):
140 return self.__get_schema_from_glue()
141
142 schema = {}
143 query = """
144 SELECT table_schema, table_name, column_name
145 FROM information_schema.columns
146 WHERE table_schema NOT IN ('information_schema')
147 """
148
149 results, error = self.run_query(query, None)
150 if error is not None:
151 raise Exception("Failed getting schema.")
152
153 results = json_loads(results)
154 for row in results['rows']:
155 table_name = '{0}.{1}'.format(row['table_schema'], row['table_name'])
156 if table_name not in schema:
157 schema[table_name] = {'name': table_name, 'columns': []}
158 schema[table_name]['columns'].append(row['column_name'])
159
160 return schema.values()
161
162 def run_query(self, query, user):
163 cursor = pyathena.connect(
164 s3_staging_dir=self.configuration['s3_staging_dir'],
165 region_name=self.configuration['region'],
166 aws_access_key_id=self.configuration.get('aws_access_key', None),
167 aws_secret_access_key=self.configuration.get('aws_secret_key', None),
168 schema_name=self.configuration.get('schema', 'default'),
169 encryption_option=self.configuration.get('encryption_option', None),
170 kms_key=self.configuration.get('kms_key', None),
171 formatter=SimpleFormatter()).cursor()
172
173 try:
174 cursor.execute(query)
175 column_tuples = [(i[0], _TYPE_MAPPINGS.get(i[1], None)) for i in cursor.description]
176 columns = self.fetch_columns(column_tuples)
177 rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
178 qbytes = None
179 athena_query_id = None
180 try:
181 qbytes = cursor.data_scanned_in_bytes
182 except AttributeError as e:
183 logger.debug("Athena Upstream can't get data_scanned_in_bytes: %s", e)
184 try:
185 athena_query_id = cursor.query_id
186 except AttributeError as e:
187 logger.debug("Athena Upstream can't get query_id: %s", e)
188 data = {
189 'columns': columns,
190 'rows': rows,
191 'metadata': {
192 'data_scanned': qbytes,
193 'athena_query_id': athena_query_id
194 }
195 }
196 json_data = json_dumps(data, ignore_nan=True)
197 error = None
198 except KeyboardInterrupt:
199 if cursor.query_id:
200 cursor.cancel()
201 error = "Query cancelled by user."
202 json_data = None
203 except Exception as ex:
204 if cursor.query_id:
205 cursor.cancel()
206 error = ex.message
207 json_data = None
208
209 return json_data, error
210
211
212 register(Athena)
213
[end of redash/query_runner/athena.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/query_runner/athena.py b/redash/query_runner/athena.py
--- a/redash/query_runner/athena.py
+++ b/redash/query_runner/athena.py
@@ -121,18 +121,20 @@
region_name=self.configuration['region']
)
schema = {}
- paginator = client.get_paginator('get_tables')
-
- for database in client.get_databases()['DatabaseList']:
- iterator = paginator.paginate(DatabaseName=database['Name'])
- for table in iterator.search('TableList[]'):
- table_name = '%s.%s' % (database['Name'], table['Name'])
- if table_name not in schema:
- column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']]
- schema[table_name] = {'name': table_name, 'columns': column}
- for partition in table.get('PartitionKeys', []):
- schema[table_name]['columns'].append(partition['Name'])
+ database_paginator = client.get_paginator('get_databases')
+ table_paginator = client.get_paginator('get_tables')
+
+ for databases in database_paginator.paginate():
+ for database in databases['DatabaseList']:
+ iterator = table_paginator.paginate(DatabaseName=database['Name'])
+ for table in iterator.search('TableList[]'):
+ table_name = '%s.%s' % (database['Name'], table['Name'])
+ if table_name not in schema:
+ column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']]
+ schema[table_name] = {'name': table_name, 'columns': column}
+ for partition in table.get('PartitionKeys', []):
+ schema[table_name]['columns'].append(partition['Name'])
return schema.values()
def get_schema(self, get_stats=False):
| {"golden_diff": "diff --git a/redash/query_runner/athena.py b/redash/query_runner/athena.py\n--- a/redash/query_runner/athena.py\n+++ b/redash/query_runner/athena.py\n@@ -121,18 +121,20 @@\n region_name=self.configuration['region']\n )\n schema = {}\n- paginator = client.get_paginator('get_tables')\n-\n- for database in client.get_databases()['DatabaseList']:\n- iterator = paginator.paginate(DatabaseName=database['Name'])\n- for table in iterator.search('TableList[]'):\n- table_name = '%s.%s' % (database['Name'], table['Name'])\n- if table_name not in schema:\n- column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']]\n- schema[table_name] = {'name': table_name, 'columns': column}\n- for partition in table.get('PartitionKeys', []):\n- schema[table_name]['columns'].append(partition['Name'])\n \n+ database_paginator = client.get_paginator('get_databases')\n+ table_paginator = client.get_paginator('get_tables')\n+\n+ for databases in database_paginator.paginate():\n+ for database in databases['DatabaseList']:\n+ iterator = table_paginator.paginate(DatabaseName=database['Name'])\n+ for table in iterator.search('TableList[]'):\n+ table_name = '%s.%s' % (database['Name'], table['Name'])\n+ if table_name not in schema:\n+ column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']]\n+ schema[table_name] = {'name': table_name, 'columns': column}\n+ for partition in table.get('PartitionKeys', []):\n+ schema[table_name]['columns'].append(partition['Name'])\n return schema.values()\n \n def get_schema(self, get_stats=False):\n", "issue": "Athena query runner doesn't paginate schemas\n<!--\r\n#####################################################################\r\n#\r\n# Need support? USE THE FORUM! https://discuss.redash.io/c/support.\r\n#\r\n# Don't have steps to reproduce and actually not sure it's a bug?\r\n# Use the forum! https://discuss.redash.io/c/support.\r\n#\r\n#####################################################################\r\n\r\n**Got an idea for a new feature?** Check if it isn't on the roadmap already: http://bit.ly/redash-roadmap and start a new discussion in the features category: https://discuss.redash.io/c/feature-requests \ud83c\udf1f.\r\n\r\nFound a bug? Please fill out the sections below... thank you \ud83d\udc4d\r\n-->\r\n\r\n### Issue Summary\r\nHi,\r\n\r\nAWS Athena query runner paginates only tables but not schemas.\r\nWe have ~170 schemas, however, only the first 100 are shown in schema browser.\r\n \r\n### Steps to Reproduce\r\n\r\n1. Connect to Athena with Glue metastore which has more than 100 schemas\r\n2. In the new query window, only 100 schemas are shown.\r\n\r\nI've looked into the code and looks like you paginate only on tables but not on schemas (https://github.com/getredash/redash/blob/v4.0.0/redash/query_runner/athena.py#L130).\r\n### Technical details:\r\n\r\n* Redash Version: 4.0.0\r\n* Browser/OS:\r\n* How did you install Redash: docker\r\n\r\n\r\nKind regards,\r\nIevgen.\n", "before_files": [{"content": "import logging\nimport os\n\nfrom redash.query_runner import *\nfrom redash.settings import parse_boolean\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\nANNOTATE_QUERY = parse_boolean(os.environ.get('ATHENA_ANNOTATE_QUERY', 'true'))\nSHOW_EXTRA_SETTINGS = parse_boolean(os.environ.get('ATHENA_SHOW_EXTRA_SETTINGS', 'true'))\nOPTIONAL_CREDENTIALS = parse_boolean(os.environ.get('ATHENA_OPTIONAL_CREDENTIALS', 'true'))\n\ntry:\n import pyathena\n import boto3\n enabled = True\nexcept ImportError:\n enabled = False\n\n\n_TYPE_MAPPINGS = {\n 'boolean': TYPE_BOOLEAN,\n 'tinyint': TYPE_INTEGER,\n 'smallint': TYPE_INTEGER,\n 'integer': TYPE_INTEGER,\n 'bigint': TYPE_INTEGER,\n 'double': TYPE_FLOAT,\n 'varchar': TYPE_STRING,\n 'timestamp': TYPE_DATETIME,\n 'date': TYPE_DATE,\n 'varbinary': TYPE_STRING,\n 'array': TYPE_STRING,\n 'map': TYPE_STRING,\n 'row': TYPE_STRING,\n 'decimal': TYPE_FLOAT,\n}\n\n\nclass SimpleFormatter(object):\n def format(self, operation, parameters=None):\n return operation\n\n\nclass Athena(BaseQueryRunner):\n noop_query = 'SELECT 1'\n\n @classmethod\n def name(cls):\n return \"Amazon Athena\"\n\n @classmethod\n def configuration_schema(cls):\n schema = {\n 'type': 'object',\n 'properties': {\n 'region': {\n 'type': 'string',\n 'title': 'AWS Region'\n },\n 'aws_access_key': {\n 'type': 'string',\n 'title': 'AWS Access Key'\n },\n 'aws_secret_key': {\n 'type': 'string',\n 'title': 'AWS Secret Key'\n },\n 's3_staging_dir': {\n 'type': 'string',\n 'title': 'S3 Staging (Query Results) Bucket Path'\n },\n 'schema': {\n 'type': 'string',\n 'title': 'Schema Name',\n 'default': 'default'\n },\n 'glue': {\n 'type': 'boolean',\n 'title': 'Use Glue Data Catalog',\n },\n },\n 'required': ['region', 's3_staging_dir'],\n 'order': ['region', 'aws_access_key', 'aws_secret_key', 's3_staging_dir', 'schema'],\n 'secret': ['aws_secret_key']\n }\n\n if SHOW_EXTRA_SETTINGS:\n schema['properties'].update({\n 'encryption_option': {\n 'type': 'string',\n 'title': 'Encryption Option',\n },\n 'kms_key': {\n 'type': 'string',\n 'title': 'KMS Key',\n },\n })\n\n if not OPTIONAL_CREDENTIALS:\n schema['required'] += ['aws_access_key', 'aws_secret_key']\n\n return schema\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def annotate_query(cls):\n return ANNOTATE_QUERY\n\n @classmethod\n def type(cls):\n return \"athena\"\n\n def __get_schema_from_glue(self):\n client = boto3.client(\n 'glue',\n aws_access_key_id=self.configuration.get('aws_access_key', None),\n aws_secret_access_key=self.configuration.get('aws_secret_key', None),\n region_name=self.configuration['region']\n )\n schema = {}\n paginator = client.get_paginator('get_tables')\n\n for database in client.get_databases()['DatabaseList']:\n iterator = paginator.paginate(DatabaseName=database['Name'])\n for table in iterator.search('TableList[]'):\n table_name = '%s.%s' % (database['Name'], table['Name'])\n if table_name not in schema:\n column = [columns['Name'] for columns in table['StorageDescriptor']['Columns']]\n schema[table_name] = {'name': table_name, 'columns': column}\n for partition in table.get('PartitionKeys', []):\n schema[table_name]['columns'].append(partition['Name'])\n\n return schema.values()\n\n def get_schema(self, get_stats=False):\n if self.configuration.get('glue', False):\n return self.__get_schema_from_glue()\n\n schema = {}\n query = \"\"\"\n SELECT table_schema, table_name, column_name\n FROM information_schema.columns\n WHERE table_schema NOT IN ('information_schema')\n \"\"\"\n\n results, error = self.run_query(query, None)\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n for row in results['rows']:\n table_name = '{0}.{1}'.format(row['table_schema'], row['table_name'])\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n schema[table_name]['columns'].append(row['column_name'])\n\n return schema.values()\n\n def run_query(self, query, user):\n cursor = pyathena.connect(\n s3_staging_dir=self.configuration['s3_staging_dir'],\n region_name=self.configuration['region'],\n aws_access_key_id=self.configuration.get('aws_access_key', None),\n aws_secret_access_key=self.configuration.get('aws_secret_key', None),\n schema_name=self.configuration.get('schema', 'default'),\n encryption_option=self.configuration.get('encryption_option', None),\n kms_key=self.configuration.get('kms_key', None),\n formatter=SimpleFormatter()).cursor()\n\n try:\n cursor.execute(query)\n column_tuples = [(i[0], _TYPE_MAPPINGS.get(i[1], None)) for i in cursor.description]\n columns = self.fetch_columns(column_tuples)\n rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]\n qbytes = None\n athena_query_id = None\n try:\n qbytes = cursor.data_scanned_in_bytes\n except AttributeError as e:\n logger.debug(\"Athena Upstream can't get data_scanned_in_bytes: %s\", e)\n try:\n athena_query_id = cursor.query_id\n except AttributeError as e:\n logger.debug(\"Athena Upstream can't get query_id: %s\", e)\n data = {\n 'columns': columns,\n 'rows': rows,\n 'metadata': {\n 'data_scanned': qbytes,\n 'athena_query_id': athena_query_id\n }\n }\n json_data = json_dumps(data, ignore_nan=True)\n error = None\n except KeyboardInterrupt:\n if cursor.query_id:\n cursor.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n except Exception as ex:\n if cursor.query_id:\n cursor.cancel()\n error = ex.message\n json_data = None\n\n return json_data, error\n\n\nregister(Athena)\n", "path": "redash/query_runner/athena.py"}]} | 2,882 | 408 |
gh_patches_debug_49038 | rasdani/github-patches | git_diff | facebookresearch__hydra-907 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Hydra's override grammar allows quoted resolver args, but OmegaConf does not recognize them
# 🐛 Bug
## Description
Best explained with an example (assume a basic `my_app.py` that prints the resolved config):
```bash
python my_app.py +port='${env:PORT}' # works, may crash if PORT is not defined
python my_app.py +port='${env:PORT,80}' # crashes: not recognized by Hydra's grammar
python my_app.py +port='${env:PORT,"80"}' # accepted by Hydra but OmegaConf does not resolve it
```
This is because in the current version of OmegaConf, quotes are not allowed inside interpolations.
## Checklist
- [x] I checked on the latest version of Hydra
- [x] I created a minimal repro
## To reproduce
** Minimal Code/Config snippet to reproduce **
```python
from omegaconf import DictConfig, OmegaConf
import hydra
@hydra.main()
def my_app(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg, resolve=True))
if __name__ == "__main__":
my_app()
```
Then try the examples above.
** Stack trace/error message **
Just showing the last example:
```
port: ${env:PORT,"80"}
```
## Expected Behavior
Hydra should allow the same characters as OmegaConf.
## System information
- **Hydra Version** : current master (7afee097)
- **Python version** : 3.8.3
- **Virtual environment type and version** : conda 4.8.3
- **Operating system** : MacOS
</issue>
<code>
[start of hydra/core/override_parser/overrides_parser.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import sys
3 from typing import Any, List, Optional
4
5 from antlr4.error.Errors import LexerNoViableAltException, RecognitionException
6
7 from hydra._internal.grammar import grammar_functions
8 from hydra._internal.grammar.functions import Functions
9 from hydra.core.config_loader import ConfigLoader
10 from hydra.core.override_parser.overrides_visitor import (
11 HydraErrorListener,
12 HydraOverrideVisitor,
13 )
14 from hydra.core.override_parser.types import Override
15 from hydra.errors import HydraException, OverrideParseException
16
17 try:
18 from hydra.grammar.gen.OverrideLexer import (
19 CommonTokenStream,
20 InputStream,
21 OverrideLexer,
22 )
23 from hydra.grammar.gen.OverrideParser import OverrideParser
24
25 except ModuleNotFoundError:
26 print(
27 "Error importing generated parsers, run `python setup.py antlr` to regenerate."
28 )
29 sys.exit(1)
30
31 # The set of parser rules that require the lexer to be in lexical mode `KEY`.
32 KEY_RULES = {"key", "override", "package", "packageOrGroup"}
33
34
35 class OverridesParser:
36 functions: Functions
37
38 @classmethod
39 def create(cls, config_loader: Optional[ConfigLoader] = None) -> "OverridesParser":
40 functions = create_functions()
41 return cls(functions=functions, config_loader=config_loader)
42
43 def __init__(
44 self, functions: Functions, config_loader: Optional[ConfigLoader] = None
45 ):
46 self.functions = functions
47 self.config_loader = config_loader
48
49 def parse_rule(self, s: str, rule_name: str) -> Any:
50 error_listener = HydraErrorListener()
51 istream = InputStream(s)
52 lexer = OverrideLexer(istream)
53 lexer.removeErrorListeners()
54 lexer.addErrorListener(error_listener)
55
56 # Set the lexer in the correct mode to parse the desired rule.
57 lexer_mode = "KEY" if rule_name in KEY_RULES else "VALUE"
58 lexer.mode(getattr(OverrideLexer, lexer_mode))
59
60 stream = CommonTokenStream(lexer)
61 parser = OverrideParser(stream)
62 parser.removeErrorListeners()
63 parser.addErrorListener(error_listener)
64 visitor = HydraOverrideVisitor(self.functions)
65 rule = getattr(parser, rule_name)
66 tree = rule()
67 ret = visitor.visit(tree)
68 if isinstance(ret, Override):
69 ret.input_line = s
70 return ret
71
72 def parse_override(self, s: str) -> Override:
73 ret = self.parse_rule(s, "override")
74 assert isinstance(ret, Override)
75 return ret
76
77 def parse_overrides(self, overrides: List[str]) -> List[Override]:
78 ret: List[Override] = []
79 for override in overrides:
80 try:
81 parsed = self.parse_rule(override, "override")
82 except HydraException as e:
83 cause = e.__cause__
84 if isinstance(cause, LexerNoViableAltException):
85 prefix = "LexerNoViableAltException: "
86 start = len(prefix) + cause.startIndex + 1
87 msg = f"{prefix}{override}" f"\n{'^'.rjust(start)}"
88 e.__cause__ = None
89 elif isinstance(cause, RecognitionException):
90 prefix = f"{e}"
91 msg = f"{prefix}"
92 e.__cause__ = None
93 else:
94 msg = f"Error parsing override '{override}'" f"\n{e}"
95 raise OverrideParseException(
96 override=override,
97 message=f"{msg}"
98 f"\nSee https://hydra.cc/docs/next/advanced/override_grammar/basic for details",
99 ) from e.__cause__
100 assert isinstance(parsed, Override)
101 parsed.config_loader = self.config_loader
102 ret.append(parsed)
103 return ret
104
105
106 def create_functions() -> Functions:
107 functions = Functions()
108 # casts
109 functions.register(name="int", func=grammar_functions.cast_int)
110 functions.register(name="str", func=grammar_functions.cast_str)
111 functions.register(name="bool", func=grammar_functions.cast_bool)
112 functions.register(name="float", func=grammar_functions.cast_float)
113 # sweeps
114 functions.register(name="choice", func=grammar_functions.choice)
115 functions.register(name="range", func=grammar_functions.range)
116 functions.register(name="interval", func=grammar_functions.interval)
117 # misc
118 functions.register(name="tag", func=grammar_functions.tag)
119 functions.register(name="sort", func=grammar_functions.sort)
120 functions.register(name="shuffle", func=grammar_functions.shuffle)
121 functions.register(name="glob", func=grammar_functions.glob)
122 return functions
123
[end of hydra/core/override_parser/overrides_parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/core/override_parser/overrides_parser.py b/hydra/core/override_parser/overrides_parser.py
--- a/hydra/core/override_parser/overrides_parser.py
+++ b/hydra/core/override_parser/overrides_parser.py
@@ -54,8 +54,8 @@
lexer.addErrorListener(error_listener)
# Set the lexer in the correct mode to parse the desired rule.
- lexer_mode = "KEY" if rule_name in KEY_RULES else "VALUE"
- lexer.mode(getattr(OverrideLexer, lexer_mode))
+ if rule_name not in KEY_RULES:
+ lexer.mode(OverrideLexer.VALUE_MODE)
stream = CommonTokenStream(lexer)
parser = OverrideParser(stream)
| {"golden_diff": "diff --git a/hydra/core/override_parser/overrides_parser.py b/hydra/core/override_parser/overrides_parser.py\n--- a/hydra/core/override_parser/overrides_parser.py\n+++ b/hydra/core/override_parser/overrides_parser.py\n@@ -54,8 +54,8 @@\n lexer.addErrorListener(error_listener)\n \n # Set the lexer in the correct mode to parse the desired rule.\n- lexer_mode = \"KEY\" if rule_name in KEY_RULES else \"VALUE\"\n- lexer.mode(getattr(OverrideLexer, lexer_mode))\n+ if rule_name not in KEY_RULES:\n+ lexer.mode(OverrideLexer.VALUE_MODE)\n \n stream = CommonTokenStream(lexer)\n parser = OverrideParser(stream)\n", "issue": "[Bug] Hydra's override grammar allows quoted resolver args, but OmegaConf does not recognize them\n# \ud83d\udc1b Bug\r\n\r\n## Description\r\n\r\nBest explained with an example (assume a basic `my_app.py` that prints the resolved config):\r\n\r\n```bash\r\npython my_app.py +port='${env:PORT}' # works, may crash if PORT is not defined\r\npython my_app.py +port='${env:PORT,80}' # crashes: not recognized by Hydra's grammar\r\npython my_app.py +port='${env:PORT,\"80\"}' # accepted by Hydra but OmegaConf does not resolve it\r\n```\r\n\r\nThis is because in the current version of OmegaConf, quotes are not allowed inside interpolations.\r\n\r\n## Checklist\r\n- [x] I checked on the latest version of Hydra\r\n- [x] I created a minimal repro\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\n```python\r\nfrom omegaconf import DictConfig, OmegaConf\r\nimport hydra\r\n\r\[email protected]()\r\ndef my_app(cfg: DictConfig) -> None:\r\n print(OmegaConf.to_yaml(cfg, resolve=True))\r\n\r\nif __name__ == \"__main__\":\r\n my_app()\r\n```\r\n\r\nThen try the examples above.\r\n\r\n** Stack trace/error message **\r\n\r\nJust showing the last example:\r\n```\r\nport: ${env:PORT,\"80\"}\r\n```\r\n\r\n## Expected Behavior\r\nHydra should allow the same characters as OmegaConf.\r\n\r\n## System information\r\n- **Hydra Version** : current master (7afee097)\r\n- **Python version** : 3.8.3\r\n- **Virtual environment type and version** : conda 4.8.3\r\n- **Operating system** : MacOS\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport sys\nfrom typing import Any, List, Optional\n\nfrom antlr4.error.Errors import LexerNoViableAltException, RecognitionException\n\nfrom hydra._internal.grammar import grammar_functions\nfrom hydra._internal.grammar.functions import Functions\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.override_parser.overrides_visitor import (\n HydraErrorListener,\n HydraOverrideVisitor,\n)\nfrom hydra.core.override_parser.types import Override\nfrom hydra.errors import HydraException, OverrideParseException\n\ntry:\n from hydra.grammar.gen.OverrideLexer import (\n CommonTokenStream,\n InputStream,\n OverrideLexer,\n )\n from hydra.grammar.gen.OverrideParser import OverrideParser\n\nexcept ModuleNotFoundError:\n print(\n \"Error importing generated parsers, run `python setup.py antlr` to regenerate.\"\n )\n sys.exit(1)\n\n# The set of parser rules that require the lexer to be in lexical mode `KEY`.\nKEY_RULES = {\"key\", \"override\", \"package\", \"packageOrGroup\"}\n\n\nclass OverridesParser:\n functions: Functions\n\n @classmethod\n def create(cls, config_loader: Optional[ConfigLoader] = None) -> \"OverridesParser\":\n functions = create_functions()\n return cls(functions=functions, config_loader=config_loader)\n\n def __init__(\n self, functions: Functions, config_loader: Optional[ConfigLoader] = None\n ):\n self.functions = functions\n self.config_loader = config_loader\n\n def parse_rule(self, s: str, rule_name: str) -> Any:\n error_listener = HydraErrorListener()\n istream = InputStream(s)\n lexer = OverrideLexer(istream)\n lexer.removeErrorListeners()\n lexer.addErrorListener(error_listener)\n\n # Set the lexer in the correct mode to parse the desired rule.\n lexer_mode = \"KEY\" if rule_name in KEY_RULES else \"VALUE\"\n lexer.mode(getattr(OverrideLexer, lexer_mode))\n\n stream = CommonTokenStream(lexer)\n parser = OverrideParser(stream)\n parser.removeErrorListeners()\n parser.addErrorListener(error_listener)\n visitor = HydraOverrideVisitor(self.functions)\n rule = getattr(parser, rule_name)\n tree = rule()\n ret = visitor.visit(tree)\n if isinstance(ret, Override):\n ret.input_line = s\n return ret\n\n def parse_override(self, s: str) -> Override:\n ret = self.parse_rule(s, \"override\")\n assert isinstance(ret, Override)\n return ret\n\n def parse_overrides(self, overrides: List[str]) -> List[Override]:\n ret: List[Override] = []\n for override in overrides:\n try:\n parsed = self.parse_rule(override, \"override\")\n except HydraException as e:\n cause = e.__cause__\n if isinstance(cause, LexerNoViableAltException):\n prefix = \"LexerNoViableAltException: \"\n start = len(prefix) + cause.startIndex + 1\n msg = f\"{prefix}{override}\" f\"\\n{'^'.rjust(start)}\"\n e.__cause__ = None\n elif isinstance(cause, RecognitionException):\n prefix = f\"{e}\"\n msg = f\"{prefix}\"\n e.__cause__ = None\n else:\n msg = f\"Error parsing override '{override}'\" f\"\\n{e}\"\n raise OverrideParseException(\n override=override,\n message=f\"{msg}\"\n f\"\\nSee https://hydra.cc/docs/next/advanced/override_grammar/basic for details\",\n ) from e.__cause__\n assert isinstance(parsed, Override)\n parsed.config_loader = self.config_loader\n ret.append(parsed)\n return ret\n\n\ndef create_functions() -> Functions:\n functions = Functions()\n # casts\n functions.register(name=\"int\", func=grammar_functions.cast_int)\n functions.register(name=\"str\", func=grammar_functions.cast_str)\n functions.register(name=\"bool\", func=grammar_functions.cast_bool)\n functions.register(name=\"float\", func=grammar_functions.cast_float)\n # sweeps\n functions.register(name=\"choice\", func=grammar_functions.choice)\n functions.register(name=\"range\", func=grammar_functions.range)\n functions.register(name=\"interval\", func=grammar_functions.interval)\n # misc\n functions.register(name=\"tag\", func=grammar_functions.tag)\n functions.register(name=\"sort\", func=grammar_functions.sort)\n functions.register(name=\"shuffle\", func=grammar_functions.shuffle)\n functions.register(name=\"glob\", func=grammar_functions.glob)\n return functions\n", "path": "hydra/core/override_parser/overrides_parser.py"}]} | 2,138 | 162 |
gh_patches_debug_28643 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-550 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ENH] Decorator utility for handling NAs and errors (for transform_column)
# Brief Description
If say `df["x"]` is a string column with NAs, then the routine `df["x"].apply(lambda x : x+"1")` doesn't work due to the NAs. It would be useful to have a decorator such that
```python
df.transform_column("x", skipna(lambda x : x + "1"))
```
would work
# Example API
```python
from janitor.func_utils import skipna, skiperror
df.transform_column("x", skipna(lambda x : x + "1"))
df.transform_column("y", skiperror(func))
@skiperror
def foo(x):
...
pass
```
# Example implementation
```python
def skipna(f):
def _wrapped(x):
if type(x) is float and np.isnan(x) or x is None:
return np.nan
else:
return f(x)
return _wrapped
def skiperror(f, return_val=None, return_x=False):
def _wrapped(x):
try:
return f(x)
except:
if return_x:
return x
return return_val
return _wrapped
```
</issue>
<code>
[start of janitor/utils.py]
1 """ Miscellaneous internal PyJanitor helper functions. """
2
3 import functools
4 import warnings
5 from typing import Callable, Dict, List, Union
6
7 import pandas as pd
8
9 from .errors import JanitorError
10
11
12 def check(varname: str, value, expected_types: list):
13 """
14 One-liner syntactic sugar for checking types.
15
16 Should be used like this::
17
18 check('x', x, [int, float])
19
20 :param varname: The name of the variable.
21 :param value: The value of the varname.
22 :param expected_types: The types we expect the item to be.
23 :returns: TypeError if data is not the expected type.
24 """
25 is_expected_type = False
26 for t in expected_types:
27 if isinstance(value, t):
28 is_expected_type = True
29 break
30
31 if not is_expected_type:
32 raise TypeError(
33 "{varname} should be one of {expected_types}".format(
34 varname=varname, expected_types=expected_types
35 )
36 )
37
38
39 def _clean_accounting_column(x: str) -> float:
40 """
41 Perform the logic for the `cleaning_style == "accounting"` attribute.
42
43 This is a private function, not intended to be used outside of
44 ``currency_column_to_numeric``.
45
46 It is intended to be used in a pandas `apply` method.
47
48 :returns: An object with a cleaned column.
49 """
50 y = x.strip()
51 y = y.replace(",", "")
52 y = y.replace(")", "")
53 y = y.replace("(", "-")
54 if y == "-":
55 return 0.00
56 return float(y)
57
58
59 def _currency_column_to_numeric(x, cast_non_numeric=None) -> str:
60 """
61 Perform logic for changing cell values.
62
63 This is a private function intended to be used only in
64 ``currency_column_to_numeric``.
65
66 It is intended to be used in a pandas `apply` method, after being passed
67 through `partial`.
68 """
69 acceptable_currency_characters = {
70 "-",
71 ".",
72 "1",
73 "2",
74 "3",
75 "4",
76 "5",
77 "6",
78 "7",
79 "8",
80 "9",
81 "0",
82 }
83 if len(x) == 0:
84 return "ORIGINAL_NA"
85
86 if cast_non_numeric:
87 if x in cast_non_numeric.keys():
88 check(
89 "{%r: %r}" % (x, str(cast_non_numeric[x])),
90 cast_non_numeric[x],
91 [int, float],
92 )
93 return cast_non_numeric[x]
94 else:
95 return "".join(i for i in x if i in acceptable_currency_characters)
96 else:
97 return "".join(i for i in x if i in acceptable_currency_characters)
98
99
100 def _replace_empty_string_with_none(x):
101 if isinstance(x, int):
102 return x
103
104 elif isinstance(x, float):
105 return x
106
107 elif len(x):
108 return x
109
110
111 def _replace_original_empty_string_with_none(x):
112 if x != "ORIGINAL_NA":
113 return x
114
115
116 def _strip_underscores(
117 df: pd.DataFrame, strip_underscores: Union[str, bool] = None
118 ) -> pd.DataFrame:
119 """
120 Strip underscores from DataFrames column names.
121
122 Underscores can be stripped from the beginning, end or both.
123
124 .. code-block:: python
125
126 df = _strip_underscores(df, strip_underscores='left')
127
128 :param df: The pandas DataFrame object.
129 :param strip_underscores: (optional) Removes the outer underscores from all
130 column names. Default None keeps outer underscores. Values can be
131 either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
132 and True.
133 :returns: A pandas DataFrame with underscores removed.
134 """
135
136 df = df.rename(
137 columns=lambda x: _strip_underscores_func(x, strip_underscores)
138 )
139 return df
140
141
142 def _strip_underscores_func(
143 col: str, strip_underscores: Union[str, bool] = None
144 ) -> pd.DataFrame:
145 """Strip underscores from a string."""
146 underscore_options = [None, "left", "right", "both", "l", "r", True]
147 if strip_underscores not in underscore_options:
148 raise JanitorError(
149 f"strip_underscores must be one of: {underscore_options}"
150 )
151
152 if strip_underscores in ["left", "l"]:
153 col = col.lstrip("_")
154 elif strip_underscores in ["right", "r"]:
155 col = col.rstrip("_")
156 elif strip_underscores == "both" or strip_underscores is True:
157 col = col.strip("_")
158 return col
159
160
161 def import_message(submodule: str, package: str, installation: str):
162 """
163 Generic message for indicating to the user when a function relies on an
164 optional module / package that is not currently installed. Includes
165 installation instructions. Used in `chemistry.py` and `biology.py`.
166
167 :param submodule: pyjanitor submodule that needs an external dependency.
168 :param package: External package this submodule relies on.
169 :param installation: Command to execute in the environment to install
170 the package.
171 """
172
173 print(
174 f"To use the janitor submodule {submodule}, you need to install "
175 f"{package}."
176 )
177 print()
178 print(f"To do so, use the following command:")
179 print()
180 print(f" {installation}")
181
182
183 def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):
184 """
185 Raises error if a function operating on a `DataFrame` is not idempotent,
186 that is, `func(func(df)) = func(df)` is not true for all `df`.
187
188 :param func: A python method.
189 :param df: A pandas `DataFrame`.
190 :param args: Positional arguments supplied to the method.
191 :param kwargs: Keyword arguments supplied to the method.
192 :raises ValueError: If `func` is found to not be idempotent for the given
193 `DataFrame` `df`.
194 """
195 if not func(df, *args, **kwargs) == func(
196 func(df, *args, **kwargs), *args, **kwargs
197 ):
198 raise ValueError(
199 "Supplied function is not idempotent for the given " "DataFrame."
200 )
201
202
203 def deprecated_alias(**aliases) -> Callable:
204 """
205 Used as a decorator when deprecating old function argument names, while
206 keeping backwards compatibility.
207
208 Implementation is inspired from `StackOverflow`_.
209
210 .. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias # noqa: E501
211
212 Functional usage example:
213
214 .. code-block:: python
215
216 @deprecated_alias(a='alpha', b='beta')
217 def simple_sum(alpha, beta):
218 return alpha + beta
219
220 :param aliases: Dictionary of aliases for a function's arguments.
221 :return: Your original function wrapped with the kwarg redirection
222 function.
223 """
224
225 def decorator(func):
226 @functools.wraps(func)
227 def wrapper(*args, **kwargs):
228 rename_kwargs(func.__name__, kwargs, aliases)
229 return func(*args, **kwargs)
230
231 return wrapper
232
233 return decorator
234
235
236 def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):
237 """
238 Used to update deprecated argument names with new names. Throws a
239 TypeError if both arguments are provided, and warns if old alias is used.
240
241 Implementation is inspired from `StackOverflow`_.
242
243 .. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias # noqa: E501
244
245 :param func_name: name of decorated function.
246 :param kwargs: Arguments supplied to the method.
247 :param aliases: Dictionary of aliases for a function's arguments.
248 :return: Nothing; the passed `kwargs` are modified directly.
249 """
250 for old_alias, new_alias in aliases.items():
251 if old_alias in kwargs:
252 if new_alias in kwargs:
253 raise TypeError(
254 f"{func_name} received both {old_alias} and {new_alias}"
255 )
256 warnings.warn(
257 f"{old_alias} is deprecated; use {new_alias}",
258 DeprecationWarning,
259 )
260 kwargs[new_alias] = kwargs.pop(old_alias)
261
262
263 def check_column(
264 df: pd.DataFrame, old_column_names: List, present: bool = True
265 ):
266 """
267 One-liner syntactic sugar for checking the presence or absence of a column.
268
269 Should be used like this::
270
271 check(df, ['a', 'b'], present=True)
272
273 :param df: The name of the variable.
274 :param old_column_names: A list of column names we want to check to see if
275 present (or absent) in df.
276 :param present: If True (default), checks to see if all of old_column_names
277 are in df.columns. If False, checks that none of old_column_names are
278 in df.columns.
279 :returns: ValueError if data is not the expected type.
280 """
281 for column_name in old_column_names:
282 if present:
283 if column_name not in df.columns:
284 raise ValueError(
285 f"{column_name} not present in dataframe columns!"
286 )
287 else: # Tests for exclusion
288 if column_name in df.columns:
289 raise ValueError(
290 f"{column_name} already present in dataframe columns!"
291 )
292
[end of janitor/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/utils.py b/janitor/utils.py
--- a/janitor/utils.py
+++ b/janitor/utils.py
@@ -4,6 +4,7 @@
import warnings
from typing import Callable, Dict, List, Union
+import numpy as np
import pandas as pd
from .errors import JanitorError
@@ -289,3 +290,66 @@
raise ValueError(
f"{column_name} already present in dataframe columns!"
)
+
+
+def skipna(f: Callable) -> Callable:
+ """
+ Decorator for escaping np.nan and None in a function
+
+ Should be used like this::
+
+ df[column].apply(skipna(transform))
+
+ or::
+
+ @skipna
+ def transform(x):
+ pass
+
+ :param f: the function to be wrapped
+ :returns: _wrapped, the wrapped function
+ """
+
+ def _wrapped(x, *args, **kwargs):
+ if (type(x) is float and np.isnan(x)) or x is None:
+ return np.nan
+ else:
+ return f(x, *args, **kwargs)
+
+ return _wrapped
+
+
+def skiperror(
+ f: Callable, return_x: bool = False, return_val=np.nan
+) -> Callable:
+ """
+ Decorator for escaping errors in a function
+
+ Should be used like this::
+
+ df[column].apply(
+ skiperror(transform, return_val=3, return_x=False))
+
+ or::
+
+ @skiperror(return_val=3, return_x=False)
+ def transform(x):
+ pass
+
+ :param f: the function to be wrapped
+ :param return_x: whether or not the original value that caused error
+ should be returned
+ :param return_val: the value to be returned when an error hits.
+ Ignored if return_x is True
+ :returns: _wrapped, the wrapped function
+ """
+
+ def _wrapped(x, *args, **kwargs):
+ try:
+ return f(x, *args, **kwargs)
+ except Exception:
+ if return_x:
+ return x
+ return return_val
+
+ return _wrapped
| {"golden_diff": "diff --git a/janitor/utils.py b/janitor/utils.py\n--- a/janitor/utils.py\n+++ b/janitor/utils.py\n@@ -4,6 +4,7 @@\n import warnings\n from typing import Callable, Dict, List, Union\n \n+import numpy as np\n import pandas as pd\n \n from .errors import JanitorError\n@@ -289,3 +290,66 @@\n raise ValueError(\n f\"{column_name} already present in dataframe columns!\"\n )\n+\n+\n+def skipna(f: Callable) -> Callable:\n+ \"\"\"\n+ Decorator for escaping np.nan and None in a function\n+\n+ Should be used like this::\n+\n+ df[column].apply(skipna(transform))\n+\n+ or::\n+\n+ @skipna\n+ def transform(x):\n+ pass\n+\n+ :param f: the function to be wrapped\n+ :returns: _wrapped, the wrapped function\n+ \"\"\"\n+\n+ def _wrapped(x, *args, **kwargs):\n+ if (type(x) is float and np.isnan(x)) or x is None:\n+ return np.nan\n+ else:\n+ return f(x, *args, **kwargs)\n+\n+ return _wrapped\n+\n+\n+def skiperror(\n+ f: Callable, return_x: bool = False, return_val=np.nan\n+) -> Callable:\n+ \"\"\"\n+ Decorator for escaping errors in a function\n+\n+ Should be used like this::\n+\n+ df[column].apply(\n+ skiperror(transform, return_val=3, return_x=False))\n+\n+ or::\n+\n+ @skiperror(return_val=3, return_x=False)\n+ def transform(x):\n+ pass\n+\n+ :param f: the function to be wrapped\n+ :param return_x: whether or not the original value that caused error\n+ should be returned\n+ :param return_val: the value to be returned when an error hits.\n+ Ignored if return_x is True\n+ :returns: _wrapped, the wrapped function\n+ \"\"\"\n+\n+ def _wrapped(x, *args, **kwargs):\n+ try:\n+ return f(x, *args, **kwargs)\n+ except Exception:\n+ if return_x:\n+ return x\n+ return return_val\n+\n+ return _wrapped\n", "issue": "[ENH] Decorator utility for handling NAs and errors (for transform_column)\n# Brief Description\r\n\r\nIf say `df[\"x\"]` is a string column with NAs, then the routine `df[\"x\"].apply(lambda x : x+\"1\")` doesn't work due to the NAs. It would be useful to have a decorator such that \r\n```python\r\ndf.transform_column(\"x\", skipna(lambda x : x + \"1\"))\r\n``` \r\nwould work\r\n\r\n# Example API\r\n\r\n```python\r\nfrom janitor.func_utils import skipna, skiperror\r\n\r\ndf.transform_column(\"x\", skipna(lambda x : x + \"1\"))\r\ndf.transform_column(\"y\", skiperror(func))\r\n\r\n@skiperror\r\ndef foo(x):\r\n ...\r\n pass\r\n```\r\n\r\n# Example implementation\r\n\r\n```python\r\n\r\ndef skipna(f):\r\n def _wrapped(x):\r\n if type(x) is float and np.isnan(x) or x is None:\r\n return np.nan\r\n else:\r\n return f(x)\r\n\r\n return _wrapped\r\n\r\n\r\ndef skiperror(f, return_val=None, return_x=False):\r\n def _wrapped(x):\r\n try:\r\n return f(x)\r\n except:\r\n if return_x:\r\n return x\r\n return return_val\r\n\r\n return _wrapped\r\n```\r\n\n", "before_files": [{"content": "\"\"\" Miscellaneous internal PyJanitor helper functions. \"\"\"\n\nimport functools\nimport warnings\nfrom typing import Callable, Dict, List, Union\n\nimport pandas as pd\n\nfrom .errors import JanitorError\n\n\ndef check(varname: str, value, expected_types: list):\n \"\"\"\n One-liner syntactic sugar for checking types.\n\n Should be used like this::\n\n check('x', x, [int, float])\n\n :param varname: The name of the variable.\n :param value: The value of the varname.\n :param expected_types: The types we expect the item to be.\n :returns: TypeError if data is not the expected type.\n \"\"\"\n is_expected_type = False\n for t in expected_types:\n if isinstance(value, t):\n is_expected_type = True\n break\n\n if not is_expected_type:\n raise TypeError(\n \"{varname} should be one of {expected_types}\".format(\n varname=varname, expected_types=expected_types\n )\n )\n\n\ndef _clean_accounting_column(x: str) -> float:\n \"\"\"\n Perform the logic for the `cleaning_style == \"accounting\"` attribute.\n\n This is a private function, not intended to be used outside of\n ``currency_column_to_numeric``.\n\n It is intended to be used in a pandas `apply` method.\n\n :returns: An object with a cleaned column.\n \"\"\"\n y = x.strip()\n y = y.replace(\",\", \"\")\n y = y.replace(\")\", \"\")\n y = y.replace(\"(\", \"-\")\n if y == \"-\":\n return 0.00\n return float(y)\n\n\ndef _currency_column_to_numeric(x, cast_non_numeric=None) -> str:\n \"\"\"\n Perform logic for changing cell values.\n\n This is a private function intended to be used only in\n ``currency_column_to_numeric``.\n\n It is intended to be used in a pandas `apply` method, after being passed\n through `partial`.\n \"\"\"\n acceptable_currency_characters = {\n \"-\",\n \".\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"0\",\n }\n if len(x) == 0:\n return \"ORIGINAL_NA\"\n\n if cast_non_numeric:\n if x in cast_non_numeric.keys():\n check(\n \"{%r: %r}\" % (x, str(cast_non_numeric[x])),\n cast_non_numeric[x],\n [int, float],\n )\n return cast_non_numeric[x]\n else:\n return \"\".join(i for i in x if i in acceptable_currency_characters)\n else:\n return \"\".join(i for i in x if i in acceptable_currency_characters)\n\n\ndef _replace_empty_string_with_none(x):\n if isinstance(x, int):\n return x\n\n elif isinstance(x, float):\n return x\n\n elif len(x):\n return x\n\n\ndef _replace_original_empty_string_with_none(x):\n if x != \"ORIGINAL_NA\":\n return x\n\n\ndef _strip_underscores(\n df: pd.DataFrame, strip_underscores: Union[str, bool] = None\n) -> pd.DataFrame:\n \"\"\"\n Strip underscores from DataFrames column names.\n\n Underscores can be stripped from the beginning, end or both.\n\n .. code-block:: python\n\n df = _strip_underscores(df, strip_underscores='left')\n\n :param df: The pandas DataFrame object.\n :param strip_underscores: (optional) Removes the outer underscores from all\n column names. Default None keeps outer underscores. Values can be\n either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n and True.\n :returns: A pandas DataFrame with underscores removed.\n \"\"\"\n\n df = df.rename(\n columns=lambda x: _strip_underscores_func(x, strip_underscores)\n )\n return df\n\n\ndef _strip_underscores_func(\n col: str, strip_underscores: Union[str, bool] = None\n) -> pd.DataFrame:\n \"\"\"Strip underscores from a string.\"\"\"\n underscore_options = [None, \"left\", \"right\", \"both\", \"l\", \"r\", True]\n if strip_underscores not in underscore_options:\n raise JanitorError(\n f\"strip_underscores must be one of: {underscore_options}\"\n )\n\n if strip_underscores in [\"left\", \"l\"]:\n col = col.lstrip(\"_\")\n elif strip_underscores in [\"right\", \"r\"]:\n col = col.rstrip(\"_\")\n elif strip_underscores == \"both\" or strip_underscores is True:\n col = col.strip(\"_\")\n return col\n\n\ndef import_message(submodule: str, package: str, installation: str):\n \"\"\"\n Generic message for indicating to the user when a function relies on an\n optional module / package that is not currently installed. Includes\n installation instructions. Used in `chemistry.py` and `biology.py`.\n\n :param submodule: pyjanitor submodule that needs an external dependency.\n :param package: External package this submodule relies on.\n :param installation: Command to execute in the environment to install\n the package.\n \"\"\"\n\n print(\n f\"To use the janitor submodule {submodule}, you need to install \"\n f\"{package}.\"\n )\n print()\n print(f\"To do so, use the following command:\")\n print()\n print(f\" {installation}\")\n\n\ndef idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):\n \"\"\"\n Raises error if a function operating on a `DataFrame` is not idempotent,\n that is, `func(func(df)) = func(df)` is not true for all `df`.\n\n :param func: A python method.\n :param df: A pandas `DataFrame`.\n :param args: Positional arguments supplied to the method.\n :param kwargs: Keyword arguments supplied to the method.\n :raises ValueError: If `func` is found to not be idempotent for the given\n `DataFrame` `df`.\n \"\"\"\n if not func(df, *args, **kwargs) == func(\n func(df, *args, **kwargs), *args, **kwargs\n ):\n raise ValueError(\n \"Supplied function is not idempotent for the given \" \"DataFrame.\"\n )\n\n\ndef deprecated_alias(**aliases) -> Callable:\n \"\"\"\n Used as a decorator when deprecating old function argument names, while\n keeping backwards compatibility.\n\n Implementation is inspired from `StackOverflow`_.\n\n .. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n @deprecated_alias(a='alpha', b='beta')\n def simple_sum(alpha, beta):\n return alpha + beta\n\n :param aliases: Dictionary of aliases for a function's arguments.\n :return: Your original function wrapped with the kwarg redirection\n function.\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rename_kwargs(func.__name__, kwargs, aliases)\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):\n \"\"\"\n Used to update deprecated argument names with new names. Throws a\n TypeError if both arguments are provided, and warns if old alias is used.\n\n Implementation is inspired from `StackOverflow`_.\n\n .. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias # noqa: E501\n\n :param func_name: name of decorated function.\n :param kwargs: Arguments supplied to the method.\n :param aliases: Dictionary of aliases for a function's arguments.\n :return: Nothing; the passed `kwargs` are modified directly.\n \"\"\"\n for old_alias, new_alias in aliases.items():\n if old_alias in kwargs:\n if new_alias in kwargs:\n raise TypeError(\n f\"{func_name} received both {old_alias} and {new_alias}\"\n )\n warnings.warn(\n f\"{old_alias} is deprecated; use {new_alias}\",\n DeprecationWarning,\n )\n kwargs[new_alias] = kwargs.pop(old_alias)\n\n\ndef check_column(\n df: pd.DataFrame, old_column_names: List, present: bool = True\n):\n \"\"\"\n One-liner syntactic sugar for checking the presence or absence of a column.\n\n Should be used like this::\n\n check(df, ['a', 'b'], present=True)\n\n :param df: The name of the variable.\n :param old_column_names: A list of column names we want to check to see if\n present (or absent) in df.\n :param present: If True (default), checks to see if all of old_column_names\n are in df.columns. If False, checks that none of old_column_names are\n in df.columns.\n :returns: ValueError if data is not the expected type.\n \"\"\"\n for column_name in old_column_names:\n if present:\n if column_name not in df.columns:\n raise ValueError(\n f\"{column_name} not present in dataframe columns!\"\n )\n else: # Tests for exclusion\n if column_name in df.columns:\n raise ValueError(\n f\"{column_name} already present in dataframe columns!\"\n )\n", "path": "janitor/utils.py"}]} | 3,669 | 512 |
gh_patches_debug_18493 | rasdani/github-patches | git_diff | getsentry__sentry-python-1868 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"ImportError: cannot import name 'StarletteIntegration' from 'sentry_sdk.integrations.starlette'" after sentry version update
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
1.9.7
### Steps to Reproduce
It only happens every once in a while so it is not easy to reproduce.
Here is my `sentry.py` (see exception trace below):
```
import functools
import inspect
import os
from typing import Callable
import sentry_sdk
from archimedes_data_fetcher.config import config
def initialize():
"""Initializes sentry"""
sentry_sdk.init(
config.get("SENTRY_DSN"),
environment=config.get("ENVIRONMENT"),
)
...
...
...
```
Here is the output of `pip list` on the failing container:
```
Package Version
----------------------- ---------
affine 2.3.1
archimedes-data-fetcher 0.5.19
archimedes-flow-utils 1.0.2
attrs 22.1.0
azure-core 1.25.1
azure-storage-blob 12.13.1
cachetools 5.2.0
certifi 2022.6.15
cffi 1.15.1
cftime 1.6.1
charset-normalizer 2.1.1
click 8.1.3
click-plugins 1.1.1
cligj 0.7.2
cloudpickle 2.1.0
croniter 1.3.5
cryptography 37.0.4
dask 2022.8.1
datadog 0.44.0
datadog-api-client 2.3.0
distributed 2022.8.1
docker 6.0.0
fsspec 2022.8.2
gitdb 4.0.9
GitPython 3.1.27
HeapDict 1.0.1
idna 3.3
importlib-resources 5.9.0
investpy 1.0.8
isodate 0.6.1
Jinja2 3.1.2
locket 1.0.0
lxml 4.9.1
MarkupSafe 2.1.1
marshmallow 3.17.1
marshmallow-oneofschema 3.0.1
minio 7.1.11
msgpack 1.0.4
msrest 0.7.1
mypy-extensions 0.4.3
netCDF4 1.6.0
numpy 1.23.2
oauthlib 3.2.0
packaging 21.3
pandas 1.4.4
partd 1.3.0
pendulum 2.1.2
pip 22.2.2
prefect 1.3.0
psutil 5.9.1
psycopg 3.1
pycparser 2.21
pyparsing 3.0.9
pyproj 3.3.1
python-box 6.0.2
python-dateutil 2.8.2
python-dotenv 0.20.0
python-slugify 6.1.2
pytz 2022.2.1
pytzdata 2020.1
PyYAML 6.0
rasterio 1.3.2
requests 2.28.1
requests-oauthlib 1.3.1
rioxarray 0.10.3
sentry-sdk 1.9.7
setuptools 65.3.0
six 1.16.0
smmap 5.0.0
snuggs 1.4.7
sortedcontainers 2.4.0
tabulate 0.8.10
tblib 1.7.0
text-unidecode 1.3
toml 0.10.2
toolz 0.12.0
tornado 6.1
typing_extensions 4.3.0
Unidecode 1.3.4
urllib3 1.26.12
websocket-client 1.4.0
wheel 0.37.1
xarray 2022.6.0
zict 2.2.0
zipp 3.8.1
```
This has not happened when I pin `sentry-sdk` version to `1.9.5`, so I think it must have been introduced in either `1.9.6` or `1.9.7`.
### Expected Result
No crash on sentry initialization.
### Actual Result
```
Task 'store[1]': Exception encountered during task execution!
Traceback (most recent call last):
File "/app/.venv/lib/python3.9/site-packages/prefect/engine/task_runner.py", line 880, in get_task_run_state
value = prefect.utilities.executors.run_task_with_timeout(
File "/app/.venv/lib/python3.9/site-packages/prefect/utilities/executors.py", line 468, in run_task_with_timeout
return task.run(*args, **kwargs) # type: ignore
File "/home/runner/work/archimedes-data-fetcher/archimedes-data-fetcher/archimedes_data_fetcher/sentry.py", line 36, in wrapper
File "/app/archimedes_data_fetcher/sentry.py", line 13, in initialize
sentry_sdk.init(
File "/app/.venv/lib/python3.9/site-packages/sentry_sdk/hub.py", line 119, in _init
client = Client(*args, **kwargs) # type: ignore
File "/app/.venv/lib/python3.9/site-packages/sentry_sdk/client.py", line 89, in __init__
self._init_impl()
File "/app/.venv/lib/python3.9/site-packages/sentry_sdk/client.py", line 123, in _init_impl
self.integrations = setup_integrations(
File "/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/__init__.py", line 109, in setup_integrations
for integration_cls in iter_default_integrations(
File "/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/__init__.py", line 41, in iter_default_integrations
yield getattr(import_module(module), cls)
File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/fastapi.py", line 4, in <module>
from sentry_sdk.integrations.starlette import (
ImportError: cannot import name 'StarletteIntegration' from 'sentry_sdk.integrations.starlette' (/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/starlette.py)
```
</issue>
<code>
[start of sentry_sdk/integrations/fastapi.py]
1 import asyncio
2
3 from sentry_sdk._types import MYPY
4 from sentry_sdk.hub import Hub, _should_send_default_pii
5 from sentry_sdk.integrations import DidNotEnable
6 from sentry_sdk.integrations.starlette import (
7 StarletteIntegration,
8 StarletteRequestExtractor,
9 )
10 from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE
11 from sentry_sdk.utils import transaction_from_function
12
13 if MYPY:
14 from typing import Any, Callable, Dict
15
16 from sentry_sdk.scope import Scope
17
18 try:
19 import fastapi # type: ignore
20 except ImportError:
21 raise DidNotEnable("FastAPI is not installed")
22
23
24 _DEFAULT_TRANSACTION_NAME = "generic FastAPI request"
25
26
27 class FastApiIntegration(StarletteIntegration):
28 identifier = "fastapi"
29
30 @staticmethod
31 def setup_once():
32 # type: () -> None
33 patch_get_request_handler()
34
35
36 def _set_transaction_name_and_source(scope, transaction_style, request):
37 # type: (Scope, str, Any) -> None
38 name = ""
39
40 if transaction_style == "endpoint":
41 endpoint = request.scope.get("endpoint")
42 if endpoint:
43 name = transaction_from_function(endpoint) or ""
44
45 elif transaction_style == "url":
46 route = request.scope.get("route")
47 if route:
48 path = getattr(route, "path", None)
49 if path is not None:
50 name = path
51
52 if not name:
53 name = _DEFAULT_TRANSACTION_NAME
54 source = TRANSACTION_SOURCE_ROUTE
55 else:
56 source = SOURCE_FOR_STYLE[transaction_style]
57
58 scope.set_transaction_name(name, source=source)
59
60
61 def patch_get_request_handler():
62 # type: () -> None
63 old_get_request_handler = fastapi.routing.get_request_handler
64
65 def _sentry_get_request_handler(*args, **kwargs):
66 # type: (*Any, **Any) -> Any
67 dependant = kwargs.get("dependant")
68 if (
69 dependant
70 and dependant.call is not None
71 and not asyncio.iscoroutinefunction(dependant.call)
72 ):
73 old_call = dependant.call
74
75 def _sentry_call(*args, **kwargs):
76 # type: (*Any, **Any) -> Any
77 hub = Hub.current
78 with hub.configure_scope() as sentry_scope:
79 if sentry_scope.profile is not None:
80 sentry_scope.profile.update_active_thread_id()
81 return old_call(*args, **kwargs)
82
83 dependant.call = _sentry_call
84
85 old_app = old_get_request_handler(*args, **kwargs)
86
87 async def _sentry_app(*args, **kwargs):
88 # type: (*Any, **Any) -> Any
89 hub = Hub.current
90 integration = hub.get_integration(FastApiIntegration)
91 if integration is None:
92 return await old_app(*args, **kwargs)
93
94 with hub.configure_scope() as sentry_scope:
95 request = args[0]
96
97 _set_transaction_name_and_source(
98 sentry_scope, integration.transaction_style, request
99 )
100
101 extractor = StarletteRequestExtractor(request)
102 info = await extractor.extract_request_info()
103
104 def _make_request_event_processor(req, integration):
105 # type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]
106 def event_processor(event, hint):
107 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
108
109 # Extract information from request
110 request_info = event.get("request", {})
111 if info:
112 if "cookies" in info and _should_send_default_pii():
113 request_info["cookies"] = info["cookies"]
114 if "data" in info:
115 request_info["data"] = info["data"]
116 event["request"] = request_info
117
118 return event
119
120 return event_processor
121
122 sentry_scope._name = FastApiIntegration.identifier
123 sentry_scope.add_event_processor(
124 _make_request_event_processor(request, integration)
125 )
126
127 return await old_app(*args, **kwargs)
128
129 return _sentry_app
130
131 fastapi.routing.get_request_handler = _sentry_get_request_handler
132
[end of sentry_sdk/integrations/fastapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/fastapi.py b/sentry_sdk/integrations/fastapi.py
--- a/sentry_sdk/integrations/fastapi.py
+++ b/sentry_sdk/integrations/fastapi.py
@@ -3,18 +3,21 @@
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations import DidNotEnable
-from sentry_sdk.integrations.starlette import (
- StarletteIntegration,
- StarletteRequestExtractor,
-)
from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE
from sentry_sdk.utils import transaction_from_function
if MYPY:
from typing import Any, Callable, Dict
-
from sentry_sdk.scope import Scope
+try:
+ from sentry_sdk.integrations.starlette import (
+ StarletteIntegration,
+ StarletteRequestExtractor,
+ )
+except DidNotEnable:
+ raise DidNotEnable("Starlette is not installed")
+
try:
import fastapi # type: ignore
except ImportError:
| {"golden_diff": "diff --git a/sentry_sdk/integrations/fastapi.py b/sentry_sdk/integrations/fastapi.py\n--- a/sentry_sdk/integrations/fastapi.py\n+++ b/sentry_sdk/integrations/fastapi.py\n@@ -3,18 +3,21 @@\n from sentry_sdk._types import MYPY\n from sentry_sdk.hub import Hub, _should_send_default_pii\n from sentry_sdk.integrations import DidNotEnable\n-from sentry_sdk.integrations.starlette import (\n- StarletteIntegration,\n- StarletteRequestExtractor,\n-)\n from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE\n from sentry_sdk.utils import transaction_from_function\n \n if MYPY:\n from typing import Any, Callable, Dict\n-\n from sentry_sdk.scope import Scope\n \n+try:\n+ from sentry_sdk.integrations.starlette import (\n+ StarletteIntegration,\n+ StarletteRequestExtractor,\n+ )\n+except DidNotEnable:\n+ raise DidNotEnable(\"Starlette is not installed\")\n+\n try:\n import fastapi # type: ignore\n except ImportError:\n", "issue": "\"ImportError: cannot import name 'StarletteIntegration' from 'sentry_sdk.integrations.starlette'\" after sentry version update\n### How do you use Sentry?\r\n\r\nSentry Saas (sentry.io)\r\n\r\n### Version\r\n\r\n1.9.7\r\n\r\n### Steps to Reproduce\r\n\r\nIt only happens every once in a while so it is not easy to reproduce.\r\n\r\nHere is my `sentry.py` (see exception trace below):\r\n\r\n```\r\nimport functools\r\nimport inspect\r\nimport os\r\nfrom typing import Callable\r\n\r\nimport sentry_sdk\r\n\r\nfrom archimedes_data_fetcher.config import config\r\n\r\n\r\ndef initialize():\r\n \"\"\"Initializes sentry\"\"\"\r\n sentry_sdk.init(\r\n config.get(\"SENTRY_DSN\"),\r\n environment=config.get(\"ENVIRONMENT\"),\r\n )\r\n...\r\n...\r\n...\r\n```\r\n\r\nHere is the output of `pip list` on the failing container:\r\n\r\n```\r\nPackage Version\r\n----------------------- ---------\r\naffine 2.3.1\r\narchimedes-data-fetcher 0.5.19\r\narchimedes-flow-utils 1.0.2\r\nattrs 22.1.0\r\nazure-core 1.25.1\r\nazure-storage-blob 12.13.1\r\ncachetools 5.2.0\r\ncertifi 2022.6.15\r\ncffi 1.15.1\r\ncftime 1.6.1\r\ncharset-normalizer 2.1.1\r\nclick 8.1.3\r\nclick-plugins 1.1.1\r\ncligj 0.7.2\r\ncloudpickle 2.1.0\r\ncroniter 1.3.5\r\ncryptography 37.0.4\r\ndask 2022.8.1\r\ndatadog 0.44.0\r\ndatadog-api-client 2.3.0\r\ndistributed 2022.8.1\r\ndocker 6.0.0\r\nfsspec 2022.8.2\r\ngitdb 4.0.9\r\nGitPython 3.1.27\r\nHeapDict 1.0.1\r\nidna 3.3\r\nimportlib-resources 5.9.0\r\ninvestpy 1.0.8\r\nisodate 0.6.1\r\nJinja2 3.1.2\r\nlocket 1.0.0\r\nlxml 4.9.1\r\nMarkupSafe 2.1.1\r\nmarshmallow 3.17.1\r\nmarshmallow-oneofschema 3.0.1\r\nminio 7.1.11\r\nmsgpack 1.0.4\r\nmsrest 0.7.1\r\nmypy-extensions 0.4.3\r\nnetCDF4 1.6.0\r\nnumpy 1.23.2\r\noauthlib 3.2.0\r\npackaging 21.3\r\npandas 1.4.4\r\npartd 1.3.0\r\npendulum 2.1.2\r\npip 22.2.2\r\nprefect 1.3.0\r\npsutil 5.9.1\r\npsycopg 3.1\r\npycparser 2.21\r\npyparsing 3.0.9\r\npyproj 3.3.1\r\npython-box 6.0.2\r\npython-dateutil 2.8.2\r\npython-dotenv 0.20.0\r\npython-slugify 6.1.2\r\npytz 2022.2.1\r\npytzdata 2020.1\r\nPyYAML 6.0\r\nrasterio 1.3.2\r\nrequests 2.28.1\r\nrequests-oauthlib 1.3.1\r\nrioxarray 0.10.3\r\nsentry-sdk 1.9.7\r\nsetuptools 65.3.0\r\nsix 1.16.0\r\nsmmap 5.0.0\r\nsnuggs 1.4.7\r\nsortedcontainers 2.4.0\r\ntabulate 0.8.10\r\ntblib 1.7.0\r\ntext-unidecode 1.3\r\ntoml 0.10.2\r\ntoolz 0.12.0\r\ntornado 6.1\r\ntyping_extensions 4.3.0\r\nUnidecode 1.3.4\r\nurllib3 1.26.12\r\nwebsocket-client 1.4.0\r\nwheel 0.37.1\r\nxarray 2022.6.0\r\nzict 2.2.0\r\nzipp 3.8.1\r\n```\r\n\r\nThis has not happened when I pin `sentry-sdk` version to `1.9.5`, so I think it must have been introduced in either `1.9.6` or `1.9.7`.\r\n\r\n### Expected Result\r\n\r\nNo crash on sentry initialization.\r\n\r\n### Actual Result\r\n\r\n```\r\nTask 'store[1]': Exception encountered during task execution!\r\nTraceback (most recent call last):\r\n File \"/app/.venv/lib/python3.9/site-packages/prefect/engine/task_runner.py\", line 880, in get_task_run_state\r\n value = prefect.utilities.executors.run_task_with_timeout(\r\n File \"/app/.venv/lib/python3.9/site-packages/prefect/utilities/executors.py\", line 468, in run_task_with_timeout\r\n return task.run(*args, **kwargs) # type: ignore\r\n File \"/home/runner/work/archimedes-data-fetcher/archimedes-data-fetcher/archimedes_data_fetcher/sentry.py\", line 36, in wrapper\r\n File \"/app/archimedes_data_fetcher/sentry.py\", line 13, in initialize\r\n sentry_sdk.init(\r\n File \"/app/.venv/lib/python3.9/site-packages/sentry_sdk/hub.py\", line 119, in _init\r\n client = Client(*args, **kwargs) # type: ignore\r\n File \"/app/.venv/lib/python3.9/site-packages/sentry_sdk/client.py\", line 89, in __init__\r\n self._init_impl()\r\n File \"/app/.venv/lib/python3.9/site-packages/sentry_sdk/client.py\", line 123, in _init_impl\r\n self.integrations = setup_integrations(\r\n File \"/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/__init__.py\", line 109, in setup_integrations\r\n for integration_cls in iter_default_integrations(\r\n File \"/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/__init__.py\", line 41, in iter_default_integrations\r\n yield getattr(import_module(module), cls)\r\n File \"/usr/local/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/fastapi.py\", line 4, in <module>\r\n from sentry_sdk.integrations.starlette import (\r\nImportError: cannot import name 'StarletteIntegration' from 'sentry_sdk.integrations.starlette' (/app/.venv/lib/python3.9/site-packages/sentry_sdk/integrations/starlette.py)\r\n```\r\n\n", "before_files": [{"content": "import asyncio\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations import DidNotEnable\nfrom sentry_sdk.integrations.starlette import (\n StarletteIntegration,\n StarletteRequestExtractor,\n)\nfrom sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE\nfrom sentry_sdk.utils import transaction_from_function\n\nif MYPY:\n from typing import Any, Callable, Dict\n\n from sentry_sdk.scope import Scope\n\ntry:\n import fastapi # type: ignore\nexcept ImportError:\n raise DidNotEnable(\"FastAPI is not installed\")\n\n\n_DEFAULT_TRANSACTION_NAME = \"generic FastAPI request\"\n\n\nclass FastApiIntegration(StarletteIntegration):\n identifier = \"fastapi\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n patch_get_request_handler()\n\n\ndef _set_transaction_name_and_source(scope, transaction_style, request):\n # type: (Scope, str, Any) -> None\n name = \"\"\n\n if transaction_style == \"endpoint\":\n endpoint = request.scope.get(\"endpoint\")\n if endpoint:\n name = transaction_from_function(endpoint) or \"\"\n\n elif transaction_style == \"url\":\n route = request.scope.get(\"route\")\n if route:\n path = getattr(route, \"path\", None)\n if path is not None:\n name = path\n\n if not name:\n name = _DEFAULT_TRANSACTION_NAME\n source = TRANSACTION_SOURCE_ROUTE\n else:\n source = SOURCE_FOR_STYLE[transaction_style]\n\n scope.set_transaction_name(name, source=source)\n\n\ndef patch_get_request_handler():\n # type: () -> None\n old_get_request_handler = fastapi.routing.get_request_handler\n\n def _sentry_get_request_handler(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n dependant = kwargs.get(\"dependant\")\n if (\n dependant\n and dependant.call is not None\n and not asyncio.iscoroutinefunction(dependant.call)\n ):\n old_call = dependant.call\n\n def _sentry_call(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n with hub.configure_scope() as sentry_scope:\n if sentry_scope.profile is not None:\n sentry_scope.profile.update_active_thread_id()\n return old_call(*args, **kwargs)\n\n dependant.call = _sentry_call\n\n old_app = old_get_request_handler(*args, **kwargs)\n\n async def _sentry_app(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FastApiIntegration)\n if integration is None:\n return await old_app(*args, **kwargs)\n\n with hub.configure_scope() as sentry_scope:\n request = args[0]\n\n _set_transaction_name_and_source(\n sentry_scope, integration.transaction_style, request\n )\n\n extractor = StarletteRequestExtractor(request)\n info = await extractor.extract_request_info()\n\n def _make_request_event_processor(req, integration):\n # type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]\n def event_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n\n # Extract information from request\n request_info = event.get(\"request\", {})\n if info:\n if \"cookies\" in info and _should_send_default_pii():\n request_info[\"cookies\"] = info[\"cookies\"]\n if \"data\" in info:\n request_info[\"data\"] = info[\"data\"]\n event[\"request\"] = request_info\n\n return event\n\n return event_processor\n\n sentry_scope._name = FastApiIntegration.identifier\n sentry_scope.add_event_processor(\n _make_request_event_processor(request, integration)\n )\n\n return await old_app(*args, **kwargs)\n\n return _sentry_app\n\n fastapi.routing.get_request_handler = _sentry_get_request_handler\n", "path": "sentry_sdk/integrations/fastapi.py"}]} | 3,573 | 242 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.