problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_23580 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-3516 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Confusing confdir argument
My tests are messy cause I used mitmproxy and mitmdump, both of them in 4.0.1 and 4.0.3 and now I mixed all cases.
At some point, I think I had an error saying that --confdir was deprecated and had to use "--set confdir=" (I can't reproduce this case though with mitmproxy or mitmdump...)
I spent some time to try to make "--set confdir=" work in a weird bash script and arguments with quotes and maybe it failed due to me.
But I realized --confdir was still working eventually for mitmdump in 4.0.3.
Question to sum up:
Is it attended to have both "--confdir" and "--set confdir=" working at the same time for mitmproxy & mitmdump?
If yes, help (-h) should specify it clearly with something like: "--confdir PATH, --set confdir=PATH"
If not, one of them should be deleted.
</issue>
<code>
[start of mitmproxy/tools/cmdline.py]
1 import argparse
2
3 from mitmproxy.addons import core
4
5
6 def common_options(parser, opts):
7 parser.add_argument(
8 '--version',
9 action='store_true',
10 help="show version number and exit",
11 dest='version',
12 )
13 parser.add_argument(
14 '--options',
15 action='store_true',
16 help="Show all options and their default values",
17 )
18 parser.add_argument(
19 '--commands',
20 action='store_true',
21 help="Show all commands and their signatures",
22 )
23 parser.add_argument(
24 "--confdir",
25 type=str, dest="confdir", default=core.CONF_DIR,
26 metavar="PATH",
27 help="Path to the mitmproxy config directory"
28 )
29 parser.add_argument(
30 "--set",
31 type=str, dest="setoptions", default=[],
32 action="append",
33 metavar="option[=value]",
34 help="""
35 Set an option. When the value is omitted, booleans are set to true,
36 strings and integers are set to None (if permitted), and sequences
37 are emptied. Boolean values can be true, false or toggle.
38 """
39 )
40 parser.add_argument(
41 "-q", "--quiet",
42 action="store_true", dest="quiet",
43 help="Quiet."
44 )
45 parser.add_argument(
46 "-v", "--verbose",
47 action="store_const", dest="verbose", const='debug',
48 help="Increase log verbosity."
49 )
50
51 # Basic options
52 opts.make_parser(parser, "mode", short="m")
53 opts.make_parser(parser, "anticache")
54 opts.make_parser(parser, "showhost")
55 opts.make_parser(parser, "rfile", metavar="PATH", short="r")
56 opts.make_parser(parser, "scripts", metavar="SCRIPT", short="s")
57 opts.make_parser(parser, "stickycookie", metavar="FILTER")
58 opts.make_parser(parser, "stickyauth", metavar="FILTER")
59 opts.make_parser(parser, "save_stream_file", metavar="PATH", short="w")
60 opts.make_parser(parser, "anticomp")
61
62 # Proxy options
63 group = parser.add_argument_group("Proxy Options")
64 opts.make_parser(group, "listen_host", metavar="HOST")
65 opts.make_parser(group, "listen_port", metavar="PORT", short="p")
66 opts.make_parser(group, "server", short="n")
67 opts.make_parser(group, "ignore_hosts", metavar="HOST")
68 opts.make_parser(group, "tcp_hosts", metavar="HOST")
69 opts.make_parser(group, "upstream_auth", metavar="USER:PASS")
70 opts.make_parser(group, "proxyauth", metavar="SPEC")
71 opts.make_parser(group, "rawtcp")
72 opts.make_parser(group, "http2")
73
74 # Proxy SSL options
75 group = parser.add_argument_group("SSL")
76 opts.make_parser(group, "certs", metavar="SPEC")
77 opts.make_parser(group, "ssl_insecure", short="k")
78
79 # Client replay
80 group = parser.add_argument_group("Client Replay")
81 opts.make_parser(group, "client_replay", metavar="PATH", short="C")
82
83 # Server replay
84 group = parser.add_argument_group("Server Replay")
85 opts.make_parser(group, "server_replay", metavar="PATH", short="S")
86 opts.make_parser(group, "server_replay_kill_extra")
87 opts.make_parser(group, "server_replay_nopop")
88
89 # Replacements
90 group = parser.add_argument_group("Replacements")
91 opts.make_parser(group, "replacements", metavar="PATTERN", short="R")
92
93 # Set headers
94 group = parser.add_argument_group("Set Headers")
95 opts.make_parser(group, "setheaders", metavar="PATTERN", short="H")
96
97
98 def mitmproxy(opts):
99 parser = argparse.ArgumentParser(usage="%(prog)s [options]")
100 common_options(parser, opts)
101
102 opts.make_parser(parser, "console_layout")
103 opts.make_parser(parser, "console_layout_headers")
104 group = parser.add_argument_group(
105 "Filters",
106 "See help in mitmproxy for filter expression syntax."
107 )
108 opts.make_parser(group, "intercept", metavar="FILTER")
109 opts.make_parser(group, "view_filter", metavar="FILTER")
110 return parser
111
112
113 def mitmdump(opts):
114 parser = argparse.ArgumentParser(usage="%(prog)s [options] [filter]")
115
116 common_options(parser, opts)
117 opts.make_parser(parser, "flow_detail", metavar = "LEVEL")
118 parser.add_argument(
119 'filter_args',
120 nargs="...",
121 help="""
122 Filter expression, equivalent to setting both the view_filter
123 and save_stream_filter options.
124 """
125 )
126 return parser
127
128
129 def mitmweb(opts):
130 parser = argparse.ArgumentParser(usage="%(prog)s [options]")
131
132 group = parser.add_argument_group("Mitmweb")
133 opts.make_parser(group, "web_open_browser")
134 opts.make_parser(group, "web_port", metavar="PORT")
135 opts.make_parser(group, "web_iface", metavar="INTERFACE")
136
137 common_options(parser, opts)
138 group = parser.add_argument_group(
139 "Filters",
140 "See help in mitmproxy for filter expression syntax."
141 )
142 opts.make_parser(group, "intercept", metavar="FILTER")
143 return parser
144
[end of mitmproxy/tools/cmdline.py]
[start of mitmproxy/tools/_main.py]
1 """
2 This file contains python3.6+ syntax!
3 Feel free to import and use whatever new package you deem necessary.
4 """
5
6 import os
7 import sys
8 import asyncio
9 import argparse # noqa
10 import signal # noqa
11 import typing # noqa
12
13 from mitmproxy.tools import cmdline # noqa
14 from mitmproxy import exceptions, master # noqa
15 from mitmproxy import options # noqa
16 from mitmproxy import optmanager # noqa
17 from mitmproxy import proxy # noqa
18 from mitmproxy import log # noqa
19 from mitmproxy.utils import debug, arg_check # noqa
20
21 OPTIONS_FILE_NAME = "config.yaml"
22
23
24 def assert_utf8_env():
25 spec = ""
26 for i in ["LANG", "LC_CTYPE", "LC_ALL"]:
27 spec += os.environ.get(i, "").lower()
28 if "utf" not in spec:
29 print(
30 "Error: mitmproxy requires a UTF console environment.",
31 file=sys.stderr
32 )
33 print(
34 "Set your LANG environment variable to something like en_US.UTF-8",
35 file=sys.stderr
36 )
37 sys.exit(1)
38
39
40 def process_options(parser, opts, args):
41 if args.version:
42 print(debug.dump_system_info())
43 sys.exit(0)
44 if args.quiet or args.options or args.commands:
45 # also reduce log verbosity if --options or --commands is passed,
46 # we don't want log messages from regular startup then.
47 args.termlog_verbosity = 'error'
48 args.flow_detail = 0
49 if args.verbose:
50 args.termlog_verbosity = 'debug'
51 args.flow_detail = 2
52
53 adict = {}
54 for n in dir(args):
55 if n in opts:
56 adict[n] = getattr(args, n)
57 opts.merge(adict)
58
59 return proxy.config.ProxyConfig(opts)
60
61
62 def run(
63 master_cls: typing.Type[master.Master],
64 make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],
65 arguments: typing.Sequence[str],
66 extra: typing.Callable[[typing.Any], dict] = None
67 ) -> master.Master: # pragma: no cover
68 """
69 extra: Extra argument processing callable which returns a dict of
70 options.
71 """
72 debug.register_info_dumpers()
73
74 opts = options.Options()
75 master = master_cls(opts)
76
77 parser = make_parser(opts)
78
79 # To make migration from 2.x to 3.0 bearable.
80 if "-R" in sys.argv and sys.argv[sys.argv.index("-R") + 1].startswith("http"):
81 print("-R is used for specifying replacements.\n"
82 "To use mitmproxy in reverse mode please use --mode reverse:SPEC instead")
83
84 try:
85 args = parser.parse_args(arguments)
86 except SystemExit:
87 arg_check.check()
88 sys.exit(1)
89 try:
90 opts.confdir = args.confdir
91 optmanager.load_paths(
92 opts,
93 os.path.join(opts.confdir, OPTIONS_FILE_NAME),
94 )
95 pconf = process_options(parser, opts, args)
96 server: typing.Any = None
97 if pconf.options.server:
98 try:
99 server = proxy.server.ProxyServer(pconf)
100 except exceptions.ServerException as v:
101 print(str(v), file=sys.stderr)
102 sys.exit(1)
103 else:
104 server = proxy.server.DummyServer(pconf)
105
106 master.server = server
107 if args.options:
108 print(optmanager.dump_defaults(opts))
109 sys.exit(0)
110 if args.commands:
111 master.commands.dump()
112 sys.exit(0)
113 opts.set(*args.setoptions, defer=True)
114 if extra:
115 opts.update(**extra(args))
116
117 loop = asyncio.get_event_loop()
118 for signame in ('SIGINT', 'SIGTERM'):
119 try:
120 loop.add_signal_handler(getattr(signal, signame), master.shutdown)
121 except NotImplementedError:
122 # Not supported on Windows
123 pass
124
125 # Make sure that we catch KeyboardInterrupts on Windows.
126 # https://stackoverflow.com/a/36925722/934719
127 if os.name == "nt":
128 async def wakeup():
129 while True:
130 await asyncio.sleep(0.2)
131 asyncio.ensure_future(wakeup())
132
133 master.run()
134 except exceptions.OptionsError as e:
135 print("%s: %s" % (sys.argv[0], e), file=sys.stderr)
136 sys.exit(1)
137 except (KeyboardInterrupt, RuntimeError):
138 pass
139 return master
140
141
142 def mitmproxy(args=None) -> typing.Optional[int]: # pragma: no cover
143 if os.name == "nt":
144 print("Error: mitmproxy's console interface is not supported on Windows. "
145 "You can run mitmdump or mitmweb instead.", file=sys.stderr)
146 return 1
147 assert_utf8_env()
148 from mitmproxy.tools import console
149 run(console.master.ConsoleMaster, cmdline.mitmproxy, args)
150 return None
151
152
153 def mitmdump(args=None) -> typing.Optional[int]: # pragma: no cover
154 from mitmproxy.tools import dump
155
156 def extra(args):
157 if args.filter_args:
158 v = " ".join(args.filter_args)
159 return dict(
160 save_stream_filter=v,
161 readfile_filter=v,
162 dumper_filter=v,
163 )
164 return {}
165
166 m = run(dump.DumpMaster, cmdline.mitmdump, args, extra)
167 if m and m.errorcheck.has_errored: # type: ignore
168 return 1
169 return None
170
171
172 def mitmweb(args=None) -> typing.Optional[int]: # pragma: no cover
173 from mitmproxy.tools import web
174 run(web.master.WebMaster, cmdline.mitmweb, args)
175 return None
176
[end of mitmproxy/tools/_main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/tools/_main.py b/mitmproxy/tools/_main.py
--- a/mitmproxy/tools/_main.py
+++ b/mitmproxy/tools/_main.py
@@ -87,7 +87,7 @@
arg_check.check()
sys.exit(1)
try:
- opts.confdir = args.confdir
+ opts.set(*args.setoptions, defer=True)
optmanager.load_paths(
opts,
os.path.join(opts.confdir, OPTIONS_FILE_NAME),
@@ -110,7 +110,6 @@
if args.commands:
master.commands.dump()
sys.exit(0)
- opts.set(*args.setoptions, defer=True)
if extra:
opts.update(**extra(args))
diff --git a/mitmproxy/tools/cmdline.py b/mitmproxy/tools/cmdline.py
--- a/mitmproxy/tools/cmdline.py
+++ b/mitmproxy/tools/cmdline.py
@@ -20,12 +20,6 @@
action='store_true',
help="Show all commands and their signatures",
)
- parser.add_argument(
- "--confdir",
- type=str, dest="confdir", default=core.CONF_DIR,
- metavar="PATH",
- help="Path to the mitmproxy config directory"
- )
parser.add_argument(
"--set",
type=str, dest="setoptions", default=[],
| {"golden_diff": "diff --git a/mitmproxy/tools/_main.py b/mitmproxy/tools/_main.py\n--- a/mitmproxy/tools/_main.py\n+++ b/mitmproxy/tools/_main.py\n@@ -87,7 +87,7 @@\n arg_check.check()\n sys.exit(1)\n try:\n- opts.confdir = args.confdir\n+ opts.set(*args.setoptions, defer=True)\n optmanager.load_paths(\n opts,\n os.path.join(opts.confdir, OPTIONS_FILE_NAME),\n@@ -110,7 +110,6 @@\n if args.commands:\n master.commands.dump()\n sys.exit(0)\n- opts.set(*args.setoptions, defer=True)\n if extra:\n opts.update(**extra(args))\n \ndiff --git a/mitmproxy/tools/cmdline.py b/mitmproxy/tools/cmdline.py\n--- a/mitmproxy/tools/cmdline.py\n+++ b/mitmproxy/tools/cmdline.py\n@@ -20,12 +20,6 @@\n action='store_true',\n help=\"Show all commands and their signatures\",\n )\n- parser.add_argument(\n- \"--confdir\",\n- type=str, dest=\"confdir\", default=core.CONF_DIR,\n- metavar=\"PATH\",\n- help=\"Path to the mitmproxy config directory\"\n- )\n parser.add_argument(\n \"--set\",\n type=str, dest=\"setoptions\", default=[],\n", "issue": "Confusing confdir argument\nMy tests are messy cause I used mitmproxy and mitmdump, both of them in 4.0.1 and 4.0.3 and now I mixed all cases.\r\n\r\nAt some point, I think I had an error saying that --confdir was deprecated and had to use \"--set confdir=\" (I can't reproduce this case though with mitmproxy or mitmdump...)\r\n\r\nI spent some time to try to make \"--set confdir=\" work in a weird bash script and arguments with quotes and maybe it failed due to me.\r\nBut I realized --confdir was still working eventually for mitmdump in 4.0.3.\r\n\r\nQuestion to sum up:\r\nIs it attended to have both \"--confdir\" and \"--set confdir=\" working at the same time for mitmproxy & mitmdump?\r\nIf yes, help (-h) should specify it clearly with something like: \"--confdir PATH, --set confdir=PATH\"\r\nIf not, one of them should be deleted.\n", "before_files": [{"content": "import argparse\n\nfrom mitmproxy.addons import core\n\n\ndef common_options(parser, opts):\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"show version number and exit\",\n dest='version',\n )\n parser.add_argument(\n '--options',\n action='store_true',\n help=\"Show all options and their default values\",\n )\n parser.add_argument(\n '--commands',\n action='store_true',\n help=\"Show all commands and their signatures\",\n )\n parser.add_argument(\n \"--confdir\",\n type=str, dest=\"confdir\", default=core.CONF_DIR,\n metavar=\"PATH\",\n help=\"Path to the mitmproxy config directory\"\n )\n parser.add_argument(\n \"--set\",\n type=str, dest=\"setoptions\", default=[],\n action=\"append\",\n metavar=\"option[=value]\",\n help=\"\"\"\n Set an option. When the value is omitted, booleans are set to true,\n strings and integers are set to None (if permitted), and sequences\n are emptied. Boolean values can be true, false or toggle.\n \"\"\"\n )\n parser.add_argument(\n \"-q\", \"--quiet\",\n action=\"store_true\", dest=\"quiet\",\n help=\"Quiet.\"\n )\n parser.add_argument(\n \"-v\", \"--verbose\",\n action=\"store_const\", dest=\"verbose\", const='debug',\n help=\"Increase log verbosity.\"\n )\n\n # Basic options\n opts.make_parser(parser, \"mode\", short=\"m\")\n opts.make_parser(parser, \"anticache\")\n opts.make_parser(parser, \"showhost\")\n opts.make_parser(parser, \"rfile\", metavar=\"PATH\", short=\"r\")\n opts.make_parser(parser, \"scripts\", metavar=\"SCRIPT\", short=\"s\")\n opts.make_parser(parser, \"stickycookie\", metavar=\"FILTER\")\n opts.make_parser(parser, \"stickyauth\", metavar=\"FILTER\")\n opts.make_parser(parser, \"save_stream_file\", metavar=\"PATH\", short=\"w\")\n opts.make_parser(parser, \"anticomp\")\n\n # Proxy options\n group = parser.add_argument_group(\"Proxy Options\")\n opts.make_parser(group, \"listen_host\", metavar=\"HOST\")\n opts.make_parser(group, \"listen_port\", metavar=\"PORT\", short=\"p\")\n opts.make_parser(group, \"server\", short=\"n\")\n opts.make_parser(group, \"ignore_hosts\", metavar=\"HOST\")\n opts.make_parser(group, \"tcp_hosts\", metavar=\"HOST\")\n opts.make_parser(group, \"upstream_auth\", metavar=\"USER:PASS\")\n opts.make_parser(group, \"proxyauth\", metavar=\"SPEC\")\n opts.make_parser(group, \"rawtcp\")\n opts.make_parser(group, \"http2\")\n\n # Proxy SSL options\n group = parser.add_argument_group(\"SSL\")\n opts.make_parser(group, \"certs\", metavar=\"SPEC\")\n opts.make_parser(group, \"ssl_insecure\", short=\"k\")\n\n # Client replay\n group = parser.add_argument_group(\"Client Replay\")\n opts.make_parser(group, \"client_replay\", metavar=\"PATH\", short=\"C\")\n\n # Server replay\n group = parser.add_argument_group(\"Server Replay\")\n opts.make_parser(group, \"server_replay\", metavar=\"PATH\", short=\"S\")\n opts.make_parser(group, \"server_replay_kill_extra\")\n opts.make_parser(group, \"server_replay_nopop\")\n\n # Replacements\n group = parser.add_argument_group(\"Replacements\")\n opts.make_parser(group, \"replacements\", metavar=\"PATTERN\", short=\"R\")\n\n # Set headers\n group = parser.add_argument_group(\"Set Headers\")\n opts.make_parser(group, \"setheaders\", metavar=\"PATTERN\", short=\"H\")\n\n\ndef mitmproxy(opts):\n parser = argparse.ArgumentParser(usage=\"%(prog)s [options]\")\n common_options(parser, opts)\n\n opts.make_parser(parser, \"console_layout\")\n opts.make_parser(parser, \"console_layout_headers\")\n group = parser.add_argument_group(\n \"Filters\",\n \"See help in mitmproxy for filter expression syntax.\"\n )\n opts.make_parser(group, \"intercept\", metavar=\"FILTER\")\n opts.make_parser(group, \"view_filter\", metavar=\"FILTER\")\n return parser\n\n\ndef mitmdump(opts):\n parser = argparse.ArgumentParser(usage=\"%(prog)s [options] [filter]\")\n\n common_options(parser, opts)\n opts.make_parser(parser, \"flow_detail\", metavar = \"LEVEL\")\n parser.add_argument(\n 'filter_args',\n nargs=\"...\",\n help=\"\"\"\n Filter expression, equivalent to setting both the view_filter\n and save_stream_filter options.\n \"\"\"\n )\n return parser\n\n\ndef mitmweb(opts):\n parser = argparse.ArgumentParser(usage=\"%(prog)s [options]\")\n\n group = parser.add_argument_group(\"Mitmweb\")\n opts.make_parser(group, \"web_open_browser\")\n opts.make_parser(group, \"web_port\", metavar=\"PORT\")\n opts.make_parser(group, \"web_iface\", metavar=\"INTERFACE\")\n\n common_options(parser, opts)\n group = parser.add_argument_group(\n \"Filters\",\n \"See help in mitmproxy for filter expression syntax.\"\n )\n opts.make_parser(group, \"intercept\", metavar=\"FILTER\")\n return parser\n", "path": "mitmproxy/tools/cmdline.py"}, {"content": "\"\"\"\nThis file contains python3.6+ syntax!\nFeel free to import and use whatever new package you deem necessary.\n\"\"\"\n\nimport os\nimport sys\nimport asyncio\nimport argparse # noqa\nimport signal # noqa\nimport typing # noqa\n\nfrom mitmproxy.tools import cmdline # noqa\nfrom mitmproxy import exceptions, master # noqa\nfrom mitmproxy import options # noqa\nfrom mitmproxy import optmanager # noqa\nfrom mitmproxy import proxy # noqa\nfrom mitmproxy import log # noqa\nfrom mitmproxy.utils import debug, arg_check # noqa\n\nOPTIONS_FILE_NAME = \"config.yaml\"\n\n\ndef assert_utf8_env():\n spec = \"\"\n for i in [\"LANG\", \"LC_CTYPE\", \"LC_ALL\"]:\n spec += os.environ.get(i, \"\").lower()\n if \"utf\" not in spec:\n print(\n \"Error: mitmproxy requires a UTF console environment.\",\n file=sys.stderr\n )\n print(\n \"Set your LANG environment variable to something like en_US.UTF-8\",\n file=sys.stderr\n )\n sys.exit(1)\n\n\ndef process_options(parser, opts, args):\n if args.version:\n print(debug.dump_system_info())\n sys.exit(0)\n if args.quiet or args.options or args.commands:\n # also reduce log verbosity if --options or --commands is passed,\n # we don't want log messages from regular startup then.\n args.termlog_verbosity = 'error'\n args.flow_detail = 0\n if args.verbose:\n args.termlog_verbosity = 'debug'\n args.flow_detail = 2\n\n adict = {}\n for n in dir(args):\n if n in opts:\n adict[n] = getattr(args, n)\n opts.merge(adict)\n\n return proxy.config.ProxyConfig(opts)\n\n\ndef run(\n master_cls: typing.Type[master.Master],\n make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],\n arguments: typing.Sequence[str],\n extra: typing.Callable[[typing.Any], dict] = None\n) -> master.Master: # pragma: no cover\n \"\"\"\n extra: Extra argument processing callable which returns a dict of\n options.\n \"\"\"\n debug.register_info_dumpers()\n\n opts = options.Options()\n master = master_cls(opts)\n\n parser = make_parser(opts)\n\n # To make migration from 2.x to 3.0 bearable.\n if \"-R\" in sys.argv and sys.argv[sys.argv.index(\"-R\") + 1].startswith(\"http\"):\n print(\"-R is used for specifying replacements.\\n\"\n \"To use mitmproxy in reverse mode please use --mode reverse:SPEC instead\")\n\n try:\n args = parser.parse_args(arguments)\n except SystemExit:\n arg_check.check()\n sys.exit(1)\n try:\n opts.confdir = args.confdir\n optmanager.load_paths(\n opts,\n os.path.join(opts.confdir, OPTIONS_FILE_NAME),\n )\n pconf = process_options(parser, opts, args)\n server: typing.Any = None\n if pconf.options.server:\n try:\n server = proxy.server.ProxyServer(pconf)\n except exceptions.ServerException as v:\n print(str(v), file=sys.stderr)\n sys.exit(1)\n else:\n server = proxy.server.DummyServer(pconf)\n\n master.server = server\n if args.options:\n print(optmanager.dump_defaults(opts))\n sys.exit(0)\n if args.commands:\n master.commands.dump()\n sys.exit(0)\n opts.set(*args.setoptions, defer=True)\n if extra:\n opts.update(**extra(args))\n\n loop = asyncio.get_event_loop()\n for signame in ('SIGINT', 'SIGTERM'):\n try:\n loop.add_signal_handler(getattr(signal, signame), master.shutdown)\n except NotImplementedError:\n # Not supported on Windows\n pass\n\n # Make sure that we catch KeyboardInterrupts on Windows.\n # https://stackoverflow.com/a/36925722/934719\n if os.name == \"nt\":\n async def wakeup():\n while True:\n await asyncio.sleep(0.2)\n asyncio.ensure_future(wakeup())\n\n master.run()\n except exceptions.OptionsError as e:\n print(\"%s: %s\" % (sys.argv[0], e), file=sys.stderr)\n sys.exit(1)\n except (KeyboardInterrupt, RuntimeError):\n pass\n return master\n\n\ndef mitmproxy(args=None) -> typing.Optional[int]: # pragma: no cover\n if os.name == \"nt\":\n print(\"Error: mitmproxy's console interface is not supported on Windows. \"\n \"You can run mitmdump or mitmweb instead.\", file=sys.stderr)\n return 1\n assert_utf8_env()\n from mitmproxy.tools import console\n run(console.master.ConsoleMaster, cmdline.mitmproxy, args)\n return None\n\n\ndef mitmdump(args=None) -> typing.Optional[int]: # pragma: no cover\n from mitmproxy.tools import dump\n\n def extra(args):\n if args.filter_args:\n v = \" \".join(args.filter_args)\n return dict(\n save_stream_filter=v,\n readfile_filter=v,\n dumper_filter=v,\n )\n return {}\n\n m = run(dump.DumpMaster, cmdline.mitmdump, args, extra)\n if m and m.errorcheck.has_errored: # type: ignore\n return 1\n return None\n\n\ndef mitmweb(args=None) -> typing.Optional[int]: # pragma: no cover\n from mitmproxy.tools import web\n run(web.master.WebMaster, cmdline.mitmweb, args)\n return None\n", "path": "mitmproxy/tools/_main.py"}]} | 3,889 | 306 |
gh_patches_debug_15965 | rasdani/github-patches | git_diff | mozilla__bugbug-121 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 'priority' feature
In bugbug/bug_features.py
</issue>
<code>
[start of bugbug/models/tracking.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from sklearn.compose import ColumnTransformer
8 from sklearn.feature_extraction import DictVectorizer
9 from sklearn.pipeline import Pipeline
10
11 from bugbug import bug_features
12 from bugbug import bugzilla
13 from bugbug.model import Model
14
15
16 class TrackingModel(Model):
17 def __init__(self, lemmatization=False):
18 Model.__init__(self, lemmatization)
19
20 feature_extractors = [
21 bug_features.has_str(),
22 bug_features.has_regression_range(),
23 bug_features.severity(),
24 bug_features.keywords(),
25 bug_features.is_coverity_issue(),
26 bug_features.has_crash_signature(),
27 bug_features.has_url(),
28 bug_features.has_w3c_url(),
29 bug_features.has_github_url(),
30 bug_features.whiteboard(),
31 bug_features.patches(),
32 bug_features.landings(),
33 bug_features.title(),
34 ]
35
36 cleanup_functions = [
37 bug_features.cleanup_fileref,
38 bug_features.cleanup_url,
39 bug_features.cleanup_synonyms,
40 ]
41
42 self.extraction_pipeline = Pipeline([
43 ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)),
44 ('union', ColumnTransformer([
45 ('data', DictVectorizer(), 'data'),
46
47 ('title', self.text_vectorizer(stop_words='english'), 'title'),
48
49 ('comments', self.text_vectorizer(stop_words='english'), 'comments'),
50 ])),
51 ])
52
53 self.clf = xgboost.XGBClassifier(n_jobs=16)
54 self.clf.set_params(predictor='cpu_predictor')
55
56 def rollback(self, change):
57 return change['field_name'].startswith('cf_tracking_firefox')
58
59 def get_labels(self):
60 classes = {}
61
62 for bug_data in bugzilla.get_bugs():
63 bug_id = int(bug_data['id'])
64
65 for entry in bug_data['history']:
66 for change in entry['changes']:
67 if change['field_name'].startswith('cf_tracking_firefox'):
68 if change['added'] in ['blocking', '+']:
69 classes[bug_id] = 1
70 elif change['added'] == '-':
71 classes[bug_id] = 0
72
73 if bug_id not in classes:
74 classes[bug_id] = 0
75
76 return classes
77
78 def get_feature_names(self):
79 return self.extraction_pipeline.named_steps['union'].get_feature_names()
80
[end of bugbug/models/tracking.py]
[start of bugbug/bug_features.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import re
7 from datetime import datetime
8 from datetime import timezone
9
10 import pandas as pd
11 from libmozdata import versions
12 from sklearn.base import BaseEstimator
13 from sklearn.base import TransformerMixin
14
15 from bugbug import bug_snapshot
16 from bugbug import repository
17
18
19 def field(bug, field):
20 if field in bug and bug[field] != '---':
21 return bug[field]
22
23 return None
24
25
26 class has_str(object):
27 def __call__(self, bug):
28 return field(bug, 'cf_has_str')
29
30
31 class has_regression_range(object):
32 def __call__(self, bug):
33 return field(bug, 'cf_has_regression_range')
34
35
36 class has_crash_signature(object):
37 def __call__(self, bug):
38 return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''
39
40
41 class keywords(object):
42 def __init__(self, to_ignore=set()):
43 self.to_ignore = to_ignore
44
45 def __call__(self, bug):
46 keywords = []
47 subkeywords = []
48 for keyword in bug['keywords']:
49 if keyword in self.to_ignore:
50 continue
51
52 keywords.append(keyword)
53
54 if keyword.startswith('sec-'):
55 subkeywords.append('sec-')
56 elif keyword.startswith('csectype-'):
57 subkeywords.append('csectype-')
58 return keywords + subkeywords
59
60
61 class severity(object):
62 def __call__(self, bug):
63 return field(bug, 'severity')
64
65
66 class is_coverity_issue(object):
67 def __call__(self, bug):
68 return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None
69
70
71 class has_url(object):
72 def __call__(self, bug):
73 return bug['url'] != ''
74
75
76 class has_w3c_url(object):
77 def __call__(self, bug):
78 return 'w3c' in bug['url']
79
80
81 class has_github_url(object):
82 def __call__(self, bug):
83 return 'github' in bug['url']
84
85
86 class whiteboard(object):
87 def __call__(self, bug):
88 ret = []
89
90 # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only
91
92 for elem in ['memshrink', '[ux]']:
93 if elem in bug['whiteboard'].lower():
94 ret.append(elem)
95
96 return ret
97
98
99 class patches(object):
100 def __call__(self, bug):
101 return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])
102
103
104 class landings(object):
105 def __call__(self, bug):
106 return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])
107
108
109 class title(object):
110 def __call__(self, bug):
111 ret = []
112
113 keywords = [
114 'fail',
115 ]
116 for keyword in keywords:
117 if keyword in bug['summary'].lower():
118 ret.append(keyword)
119
120 return ret
121
122
123 class product(object):
124 def __call__(self, bug):
125 return bug['product']
126
127
128 class component(object):
129 def __call__(self, bug):
130 return bug['component']
131
132
133 class is_mozillian(object):
134 def __call__(self, bug):
135 return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])
136
137
138 class delta_request_merge(object):
139 def __call__(self, bug):
140 for history in bug['history']:
141 for change in history['changes']:
142 if change['added'].startswith('approval-mozilla'):
143 uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
144 timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime
145 return timedelta.days + timedelta.seconds / (24 * 60 * 60)
146
147 return None
148
149
150 class commit_added(object):
151 def __call__(self, bug):
152 return sum(commit['added'] for commit in bug['commits'])
153
154
155 class commit_deleted(object):
156 def __call__(self, bug):
157 return sum(commit['deleted'] for commit in bug['commits'])
158
159
160 class commit_types(object):
161 def __call__(self, bug):
162 return sum((commit['types'] for commit in bug['commits']), [])
163
164
165 def cleanup_url(text):
166 text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
167 return re.sub(r'http\S+', '__URL__', text)
168
169
170 def cleanup_fileref(text):
171 return re.sub(r'\w+\.py\b|\w+\.json\b|\w+\.js\b|\w+\.jsm\b|\w+\.html\b|\w+\.css\b|\w+\.c\b|\w+\.cpp\b|\w+\.h\b', '__FILE_REFERENCE__', text)
172
173
174 def cleanup_responses(text):
175 return re.sub('>[^\n]+', ' ', text)
176
177
178 def cleanup_hex(text):
179 return re.sub(r'\b0[xX][0-9a-fA-F]+\b', '__HEX_NUMBER__', text)
180
181
182 def cleanup_dll(text):
183 return re.sub(r'\w+(\.dll|\.so|\.dylib)\b', '__DLL_NAME__', text)
184
185
186 def cleanup_synonyms(text):
187 synonyms = [
188 ('safemode', ['safemode', 'safe mode']),
189 ('str', ['str', 'steps to reproduce', 'repro steps']),
190 ('uaf', ['uaf', 'use after free', 'use-after-free']),
191 ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),
192 ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),
193 ('spec', ['spec', 'specification']),
194 ]
195
196 for synonym_group, synonym_list in synonyms:
197 text = re.sub('|'.join(fr'\b{synonym}\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)
198
199 return text
200
201
202 def cleanup_crash(text):
203 return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\b', '__CRASH_STATS_LINK__', text)
204
205
206 class BugExtractor(BaseEstimator, TransformerMixin):
207 def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):
208 self.feature_extractors = feature_extractors
209 self.cleanup_functions = cleanup_functions
210 self.rollback = rollback
211 self.rollback_when = rollback_when
212 self.commit_map = repository.get_commit_map() if commit_data else None
213
214 def fit(self, x, y=None):
215 return self
216
217 def transform(self, bugs):
218 results = []
219
220 for bug in bugs:
221 bug_id = bug['id']
222
223 if self.rollback:
224 bug = bug_snapshot.rollback(bug, self.rollback_when)
225
226 data = {}
227
228 if self.commit_map is not None:
229 if bug_id in self.commit_map:
230 bug['commits'] = self.commit_map[bug_id]
231 else:
232 bug['commits'] = []
233
234 for f in self.feature_extractors:
235 res = f(bug)
236
237 if res is None:
238 continue
239
240 if isinstance(res, list):
241 for item in res:
242 data[f.__class__.__name__ + '-' + item] = 'True'
243 continue
244
245 if isinstance(res, bool):
246 res = str(res)
247
248 data[f.__class__.__name__] = res
249
250 # TODO: Try simply using all possible fields instead of extracting features manually.
251
252 for cleanup_function in self.cleanup_functions:
253 bug['summary'] = cleanup_function(bug['summary'])
254 for c in bug['comments']:
255 c['text'] = cleanup_function(c['text'])
256
257 result = {
258 'data': data,
259 'title': bug['summary'],
260 'first_comment': bug['comments'][0]['text'],
261 'comments': ' '.join([c['text'] for c in bug['comments']]),
262 }
263
264 results.append(result)
265
266 return pd.DataFrame(results)
267
[end of bugbug/bug_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py
--- a/bugbug/bug_features.py
+++ b/bugbug/bug_features.py
@@ -162,6 +162,11 @@
return sum((commit['types'] for commit in bug['commits']), [])
+class priority(object):
+ def __call__(self, bug):
+ return bug['priority']
+
+
def cleanup_url(text):
text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
return re.sub(r'http\S+', '__URL__', text)
diff --git a/bugbug/models/tracking.py b/bugbug/models/tracking.py
--- a/bugbug/models/tracking.py
+++ b/bugbug/models/tracking.py
@@ -31,6 +31,7 @@
bug_features.patches(),
bug_features.landings(),
bug_features.title(),
+ bug_features.priority(),
]
cleanup_functions = [
| {"golden_diff": "diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py\n--- a/bugbug/bug_features.py\n+++ b/bugbug/bug_features.py\n@@ -162,6 +162,11 @@\n return sum((commit['types'] for commit in bug['commits']), [])\n \n \n+class priority(object):\n+ def __call__(self, bug):\n+ return bug['priority']\n+\n+\n def cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\ndiff --git a/bugbug/models/tracking.py b/bugbug/models/tracking.py\n--- a/bugbug/models/tracking.py\n+++ b/bugbug/models/tracking.py\n@@ -31,6 +31,7 @@\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n+ bug_features.priority(),\n ]\n \n cleanup_functions = [\n", "issue": "Add 'priority' feature\nIn bugbug/bug_features.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug.model import Model\n\n\nclass TrackingModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_fileref,\n bug_features.cleanup_url,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(stop_words='english'), 'title'),\n\n ('comments', self.text_vectorizer(stop_words='english'), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def rollback(self, change):\n return change['field_name'].startswith('cf_tracking_firefox')\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data['id'])\n\n for entry in bug_data['history']:\n for change in entry['changes']:\n if change['field_name'].startswith('cf_tracking_firefox'):\n if change['added'] in ['blocking', '+']:\n classes[bug_id] = 1\n elif change['added'] == '-':\n classes[bug_id] = 0\n\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n", "path": "bugbug/models/tracking.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom datetime import datetime\nfrom datetime import timezone\n\nimport pandas as pd\nfrom libmozdata import versions\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\nfrom bugbug import repository\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n ret = []\n\n # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only\n\n for elem in ['memshrink', '[ux]']:\n if elem in bug['whiteboard'].lower():\n ret.append(elem)\n\n return ret\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\nclass delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\n for change in history['changes']:\n if change['added'].startswith('approval-mozilla'):\n uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)\n timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime\n return timedelta.days + timedelta.seconds / (24 * 60 * 60)\n\n return None\n\n\nclass commit_added(object):\n def __call__(self, bug):\n return sum(commit['added'] for commit in bug['commits'])\n\n\nclass commit_deleted(object):\n def __call__(self, bug):\n return sum(commit['deleted'] for commit in bug['commits'])\n\n\nclass commit_types(object):\n def __call__(self, bug):\n return sum((commit['types'] for commit in bug['commits']), [])\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+(\\.dll|\\.so|\\.dylib)\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ('spec', ['spec', 'specification']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(fr'\\b{synonym}\\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_map = repository.get_commit_map() if commit_data else None\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n if self.commit_map is not None:\n if bug_id in self.commit_map:\n bug['commits'] = self.commit_map[bug_id]\n else:\n bug['commits'] = []\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}]} | 3,925 | 228 |
gh_patches_debug_19773 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ensure UP app only lists projects that user has access
</issue>
<code>
[start of akvo/rsr/views/account.py]
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please
7 see < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 import re
11 import json
12
13 from lxml import etree
14 from tastypie.models import ApiKey
15
16 from akvo.rsr.forms import RegisterForm, InvitedUserForm, PasswordResetForm
17 from akvo.rsr.models import Employment
18 from akvo.utils import rsr_send_mail
19
20 from django.conf import settings
21 from django.contrib.auth import login, logout, authenticate, get_user_model
22 from django.contrib.auth.forms import AuthenticationForm
23 from django.core.exceptions import ObjectDoesNotExist, ValidationError
24 from django.core.signing import TimestampSigner, BadSignature
25 from django.http import (HttpResponse, HttpResponseRedirect,
26 HttpResponseForbidden)
27 from django.shortcuts import redirect, render
28
29 from registration.models import RegistrationProfile
30
31 from django.views.decorators.csrf import csrf_exempt
32 from django.views.decorators.http import require_POST
33
34
35 def register(request):
36 """Register form."""
37 if request.method == 'POST':
38 form = RegisterForm(data=request.POST, files=request.FILES)
39 if form.is_valid():
40 # Honeypot field filled in? If so don't register and redirect to home page
41 if request.POST.get('hp_title'):
42 return redirect('index')
43 user = form.save(request)
44 return render(
45 request,
46 'registration/register_complete.html',
47 {'new_user': user},
48 )
49 else:
50 form = RegisterForm()
51 return render(
52 request,
53 'registration/register.html',
54 {'form': form, 'password_length': settings.PASSWORD_MINIMUM_LENGTH}
55 )
56
57
58 def activate(request, activation_key, extra_context=None):
59 """Activate resouce.
60
61 Activate a User's account, if their key is valid and hasn't expired.
62 Any values passed in the keyword argument "extra_context"
63 (which must be a dictionary) will be added to the context.
64 Any values in "extra_context" which are callable will be called prior to
65 being added to the context.
66 """
67 sha = re.compile('^[a-f0-9]{40}$')
68 activation_key = activation_key.lower()
69
70 if sha.search(activation_key):
71 try:
72 registration_profile = RegistrationProfile.objects.get(
73 activation_key=activation_key)
74 except RegistrationProfile.DoesNotExist:
75 user = False
76 else:
77 if not registration_profile.activation_key_expired():
78 registration_profile.activation_key = RegistrationProfile.ACTIVATED
79 registration_profile.save()
80 user = registration_profile.user
81 user.is_active = True
82 user.save()
83
84 # Log in user without password, using custom backend
85 user = authenticate(username=user.username, no_password=True)
86 login(request, user)
87 if extra_context is None:
88 extra_context = {}
89 context = dict()
90 for key, value in extra_context.items():
91 context[key] = callable(value) and value() or value
92 return render(
93 request,
94 'registration/activate.html',
95 context
96 )
97
98
99 def invite_activate(request, inviting_pk, user_pk, employment_pk, token_date, token):
100 """
101 Activate a user that has been invited to use RSR.
102
103 :param request: the request
104 :param inviting_pk: the invitee user's primary key
105 :param user_pk: the invited user's primary key
106 :param employment_pk: the employment's primary key
107 :param token_date: the first part of the token
108 :param token: the second part of the token
109 """
110
111 def approve_employment(invitee, invited, empl):
112 """
113 Approves the employment and sends a mail to the user that has invited the new user.
114
115 :param invitee: the invitee user's instance
116 :param invited: the invited user's instance
117 :param empl: the employment's instance
118 """
119 empl.approve(invitee)
120
121 if invitee:
122 # Send notification email to inviting user
123 rsr_send_mail(
124 [invitee.email],
125 subject='registration/inviting_user_notification_subject.txt',
126 message='registration/inviting_user_notification_message.txt',
127 html_message='registration/inviting_user_notification_message.html',
128 subject_context={
129 'user': invited,
130 },
131 msg_context={
132 'invited_user': invited,
133 'inviting_user': invitee,
134 'organisation': empl.organisation,
135 }
136 )
137
138 def login_and_redirect(req, invited):
139 """
140 Log the invited user in and redirect to the My projects page in MyRSR.
141
142 :param req: the request
143 :param invited: the invited user's instance
144 """
145 invited = authenticate(username=invited.username, no_password=True)
146 login(request, invited)
147 return redirect('my_projects')
148
149 bad_link, user, inviting_user, employment = False, None, None, None
150
151 try:
152 user = get_user_model().objects.get(pk=user_pk)
153 inviting_user = get_user_model().objects.get(pk=inviting_pk)
154 employment = Employment.objects.get(pk=employment_pk) if int(employment_pk) != 0 else None
155 except ObjectDoesNotExist:
156 bad_link = True
157
158 try:
159 TimestampSigner().unsign(':'.join([user.email, token_date, token]))
160 except BadSignature:
161 bad_link = True
162
163 if user and user.is_active:
164 if employment and employment.is_approved:
165 # User is active and employment is approved, so nothing to do here
166 return login_and_redirect(request, user)
167 elif employment and not bad_link:
168 # Employment is not yet approved, and link is ok.
169 # Approve employment and log user in.
170 approve_employment(inviting_user, user, employment)
171 return login_and_redirect(request, user)
172 elif employment is None and not bad_link and request.GET.get('project_invite') is not None:
173 return login_and_redirect(request, user)
174
175 if request.method == 'POST':
176 form = InvitedUserForm(user=user, data=request.POST)
177 if form.is_valid():
178 # Approve employment and save new user details
179 form.save(request)
180 if employment is not None:
181 approve_employment(inviting_user, user, employment)
182 return login_and_redirect(request, user)
183 else:
184 form = InvitedUserForm(user=user)
185
186 context = {
187 'form': form,
188 'bad_link': bad_link,
189 'password_length': settings.PASSWORD_MINIMUM_LENGTH
190 }
191 return render(request, 'registration/invite_activate.html', context)
192
193
194 def sign_in(request):
195 """Sign in.
196
197 POST have two variants with username & email:
198 - username > normal sign in
199 - email > password reset workflow
200 """
201 form = AuthenticationForm()
202 reset_form = PasswordResetForm()
203 if request.method == "POST" and 'username' in request.POST:
204 form = AuthenticationForm(data=request.POST)
205 if form.is_valid():
206 login(request, form.get_user())
207 next_page = request.GET.get('next')
208 return HttpResponseRedirect(next_page) if next_page else redirect('my_projects')
209 # Password reset on sign in page
210 elif request.method == "POST" and 'email' in request.POST:
211 reset_form = PasswordResetForm(data=request.POST)
212 if reset_form.is_valid():
213 reset_form.save(domain_override=settings.RSR_DOMAIN)
214 return HttpResponse()
215 return render(request, 'sign_in.html', {'form': form, 'reset_form': reset_form})
216
217
218 def sign_out(request):
219 """Log out resouce."""
220 logout(request)
221 return redirect('index')
222
223
224 def api_key_xml_response(user, orgs):
225 """Build the XML response.
226
227 This is used by the Up app - so make sure they match on change.
228 """
229 xml_root = etree.Element("credentials")
230
231 # User
232 user_id_element = etree.SubElement(xml_root, "user_id")
233 user_id_element.text = str(user.id)
234 user_username_element = etree.SubElement(xml_root, "username")
235 user_username_element.text = user.username
236
237 # Organisations
238 for org in orgs:
239 org_id_element = etree.SubElement(xml_root, "org_id")
240 org_id_element.text = str(org.id)
241
242 # API key
243 api_key_element = etree.SubElement(xml_root, "api_key")
244 api_key_element.text = ApiKey.objects.get_or_create(user=user)[0].key
245
246 # Published and editable projects
247 projects = orgs.all_projects().published()
248 pub_projs_element = etree.SubElement(xml_root, "published_projects")
249 edit_projs_element = etree.SubElement(xml_root, "allow_edit_projects")
250 for project in projects:
251 project_id_element = etree.SubElement(pub_projs_element, "id")
252 project_id_element.text = str(project.id)
253 if user.has_perm('rsr.change_project', project):
254 project_id_element = etree.SubElement(edit_projs_element, "id")
255 project_id_element.text = str(project.id)
256
257 return etree.tostring(etree.ElementTree(xml_root))
258
259
260 def api_key_json_response(user, orgs):
261 """
262 Build the JSON response. This is used by the Up app - so make sure they match on change.
263 """
264 response_data = dict()
265
266 # User
267 response_data["user_id"] = user.id
268 response_data["username"] = user.username
269
270 # Organisations
271 response_data["organisations"] = [org.id for org in orgs]
272
273 # API key
274 response_data["api_key"] = ApiKey.objects.get_or_create(user=user)[0].key
275
276 # Published projects
277 projects = orgs.all_projects().published()
278 response_data["published_projects"] = [p.id for p in projects]
279
280 # Editable projects
281 perm = 'rsr.change_project'
282 perm_filter = user.get_permission_filter(perm, '')
283 response_data["allow_edit_projects"] = list(projects.filter(perm_filter).values_list('id', flat=True))
284
285 return json.dumps(response_data)
286
287
288 @require_POST
289 @csrf_exempt
290 def api_key(request):
291 """On successful user credentials returns an auth token for API usage.
292
293 Since RSR changed in v3 to allow users without an organisation we need to
294 introduce a way to make old Up apps work as before but new ones support
295 users without any connected organisations.
296 """
297 request_format = request.GET.get('format', 'xml')
298 username = request.POST.get('username', False)
299 password = request.POST.get('password', False)
300 handles_unemployed = bool(request.POST.get("handles_unemployed", False))
301
302 if username and password:
303 try:
304 user = authenticate(username=username, password=password)
305 except ValidationError:
306 user = None
307 if user is not None and user.is_active:
308 orgs = user.approved_organisations()
309 if orgs or handles_unemployed:
310 if request_format == 'xml':
311 return HttpResponse(api_key_xml_response(user, orgs),
312 content_type="text/xml")
313 elif request_format == 'json':
314 return HttpResponse(api_key_json_response(user, orgs),
315 content_type="application/json")
316 return HttpResponseForbidden()
317
[end of akvo/rsr/views/account.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/views/account.py b/akvo/rsr/views/account.py
--- a/akvo/rsr/views/account.py
+++ b/akvo/rsr/views/account.py
@@ -244,7 +244,7 @@
api_key_element.text = ApiKey.objects.get_or_create(user=user)[0].key
# Published and editable projects
- projects = orgs.all_projects().published()
+ projects = user.my_projects.published()
pub_projs_element = etree.SubElement(xml_root, "published_projects")
edit_projs_element = etree.SubElement(xml_root, "allow_edit_projects")
for project in projects:
@@ -274,7 +274,7 @@
response_data["api_key"] = ApiKey.objects.get_or_create(user=user)[0].key
# Published projects
- projects = orgs.all_projects().published()
+ projects = user.my_projects().published()
response_data["published_projects"] = [p.id for p in projects]
# Editable projects
| {"golden_diff": "diff --git a/akvo/rsr/views/account.py b/akvo/rsr/views/account.py\n--- a/akvo/rsr/views/account.py\n+++ b/akvo/rsr/views/account.py\n@@ -244,7 +244,7 @@\n api_key_element.text = ApiKey.objects.get_or_create(user=user)[0].key\n \n # Published and editable projects\n- projects = orgs.all_projects().published()\n+ projects = user.my_projects.published()\n pub_projs_element = etree.SubElement(xml_root, \"published_projects\")\n edit_projs_element = etree.SubElement(xml_root, \"allow_edit_projects\")\n for project in projects:\n@@ -274,7 +274,7 @@\n response_data[\"api_key\"] = ApiKey.objects.get_or_create(user=user)[0].key\n \n # Published projects\n- projects = orgs.all_projects().published()\n+ projects = user.my_projects().published()\n response_data[\"published_projects\"] = [p.id for p in projects]\n \n # Editable projects\n", "issue": "Ensure UP app only lists projects that user has access\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport re\nimport json\n\nfrom lxml import etree\nfrom tastypie.models import ApiKey\n\nfrom akvo.rsr.forms import RegisterForm, InvitedUserForm, PasswordResetForm\nfrom akvo.rsr.models import Employment\nfrom akvo.utils import rsr_send_mail\n\nfrom django.conf import settings\nfrom django.contrib.auth import login, logout, authenticate, get_user_model\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.core.signing import TimestampSigner, BadSignature\nfrom django.http import (HttpResponse, HttpResponseRedirect,\n HttpResponseForbidden)\nfrom django.shortcuts import redirect, render\n\nfrom registration.models import RegistrationProfile\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\n\n\ndef register(request):\n \"\"\"Register form.\"\"\"\n if request.method == 'POST':\n form = RegisterForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n # Honeypot field filled in? If so don't register and redirect to home page\n if request.POST.get('hp_title'):\n return redirect('index')\n user = form.save(request)\n return render(\n request,\n 'registration/register_complete.html',\n {'new_user': user},\n )\n else:\n form = RegisterForm()\n return render(\n request,\n 'registration/register.html',\n {'form': form, 'password_length': settings.PASSWORD_MINIMUM_LENGTH}\n )\n\n\ndef activate(request, activation_key, extra_context=None):\n \"\"\"Activate resouce.\n\n Activate a User's account, if their key is valid and hasn't expired.\n Any values passed in the keyword argument \"extra_context\"\n (which must be a dictionary) will be added to the context.\n Any values in \"extra_context\" which are callable will be called prior to\n being added to the context.\n \"\"\"\n sha = re.compile('^[a-f0-9]{40}$')\n activation_key = activation_key.lower()\n\n if sha.search(activation_key):\n try:\n registration_profile = RegistrationProfile.objects.get(\n activation_key=activation_key)\n except RegistrationProfile.DoesNotExist:\n user = False\n else:\n if not registration_profile.activation_key_expired():\n registration_profile.activation_key = RegistrationProfile.ACTIVATED\n registration_profile.save()\n user = registration_profile.user\n user.is_active = True\n user.save()\n\n # Log in user without password, using custom backend\n user = authenticate(username=user.username, no_password=True)\n login(request, user)\n if extra_context is None:\n extra_context = {}\n context = dict()\n for key, value in extra_context.items():\n context[key] = callable(value) and value() or value\n return render(\n request,\n 'registration/activate.html',\n context\n )\n\n\ndef invite_activate(request, inviting_pk, user_pk, employment_pk, token_date, token):\n \"\"\"\n Activate a user that has been invited to use RSR.\n\n :param request: the request\n :param inviting_pk: the invitee user's primary key\n :param user_pk: the invited user's primary key\n :param employment_pk: the employment's primary key\n :param token_date: the first part of the token\n :param token: the second part of the token\n \"\"\"\n\n def approve_employment(invitee, invited, empl):\n \"\"\"\n Approves the employment and sends a mail to the user that has invited the new user.\n\n :param invitee: the invitee user's instance\n :param invited: the invited user's instance\n :param empl: the employment's instance\n \"\"\"\n empl.approve(invitee)\n\n if invitee:\n # Send notification email to inviting user\n rsr_send_mail(\n [invitee.email],\n subject='registration/inviting_user_notification_subject.txt',\n message='registration/inviting_user_notification_message.txt',\n html_message='registration/inviting_user_notification_message.html',\n subject_context={\n 'user': invited,\n },\n msg_context={\n 'invited_user': invited,\n 'inviting_user': invitee,\n 'organisation': empl.organisation,\n }\n )\n\n def login_and_redirect(req, invited):\n \"\"\"\n Log the invited user in and redirect to the My projects page in MyRSR.\n\n :param req: the request\n :param invited: the invited user's instance\n \"\"\"\n invited = authenticate(username=invited.username, no_password=True)\n login(request, invited)\n return redirect('my_projects')\n\n bad_link, user, inviting_user, employment = False, None, None, None\n\n try:\n user = get_user_model().objects.get(pk=user_pk)\n inviting_user = get_user_model().objects.get(pk=inviting_pk)\n employment = Employment.objects.get(pk=employment_pk) if int(employment_pk) != 0 else None\n except ObjectDoesNotExist:\n bad_link = True\n\n try:\n TimestampSigner().unsign(':'.join([user.email, token_date, token]))\n except BadSignature:\n bad_link = True\n\n if user and user.is_active:\n if employment and employment.is_approved:\n # User is active and employment is approved, so nothing to do here\n return login_and_redirect(request, user)\n elif employment and not bad_link:\n # Employment is not yet approved, and link is ok.\n # Approve employment and log user in.\n approve_employment(inviting_user, user, employment)\n return login_and_redirect(request, user)\n elif employment is None and not bad_link and request.GET.get('project_invite') is not None:\n return login_and_redirect(request, user)\n\n if request.method == 'POST':\n form = InvitedUserForm(user=user, data=request.POST)\n if form.is_valid():\n # Approve employment and save new user details\n form.save(request)\n if employment is not None:\n approve_employment(inviting_user, user, employment)\n return login_and_redirect(request, user)\n else:\n form = InvitedUserForm(user=user)\n\n context = {\n 'form': form,\n 'bad_link': bad_link,\n 'password_length': settings.PASSWORD_MINIMUM_LENGTH\n }\n return render(request, 'registration/invite_activate.html', context)\n\n\ndef sign_in(request):\n \"\"\"Sign in.\n\n POST have two variants with username & email:\n - username > normal sign in\n - email > password reset workflow\n \"\"\"\n form = AuthenticationForm()\n reset_form = PasswordResetForm()\n if request.method == \"POST\" and 'username' in request.POST:\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n login(request, form.get_user())\n next_page = request.GET.get('next')\n return HttpResponseRedirect(next_page) if next_page else redirect('my_projects')\n # Password reset on sign in page\n elif request.method == \"POST\" and 'email' in request.POST:\n reset_form = PasswordResetForm(data=request.POST)\n if reset_form.is_valid():\n reset_form.save(domain_override=settings.RSR_DOMAIN)\n return HttpResponse()\n return render(request, 'sign_in.html', {'form': form, 'reset_form': reset_form})\n\n\ndef sign_out(request):\n \"\"\"Log out resouce.\"\"\"\n logout(request)\n return redirect('index')\n\n\ndef api_key_xml_response(user, orgs):\n \"\"\"Build the XML response.\n\n This is used by the Up app - so make sure they match on change.\n \"\"\"\n xml_root = etree.Element(\"credentials\")\n\n # User\n user_id_element = etree.SubElement(xml_root, \"user_id\")\n user_id_element.text = str(user.id)\n user_username_element = etree.SubElement(xml_root, \"username\")\n user_username_element.text = user.username\n\n # Organisations\n for org in orgs:\n org_id_element = etree.SubElement(xml_root, \"org_id\")\n org_id_element.text = str(org.id)\n\n # API key\n api_key_element = etree.SubElement(xml_root, \"api_key\")\n api_key_element.text = ApiKey.objects.get_or_create(user=user)[0].key\n\n # Published and editable projects\n projects = orgs.all_projects().published()\n pub_projs_element = etree.SubElement(xml_root, \"published_projects\")\n edit_projs_element = etree.SubElement(xml_root, \"allow_edit_projects\")\n for project in projects:\n project_id_element = etree.SubElement(pub_projs_element, \"id\")\n project_id_element.text = str(project.id)\n if user.has_perm('rsr.change_project', project):\n project_id_element = etree.SubElement(edit_projs_element, \"id\")\n project_id_element.text = str(project.id)\n\n return etree.tostring(etree.ElementTree(xml_root))\n\n\ndef api_key_json_response(user, orgs):\n \"\"\"\n Build the JSON response. This is used by the Up app - so make sure they match on change.\n \"\"\"\n response_data = dict()\n\n # User\n response_data[\"user_id\"] = user.id\n response_data[\"username\"] = user.username\n\n # Organisations\n response_data[\"organisations\"] = [org.id for org in orgs]\n\n # API key\n response_data[\"api_key\"] = ApiKey.objects.get_or_create(user=user)[0].key\n\n # Published projects\n projects = orgs.all_projects().published()\n response_data[\"published_projects\"] = [p.id for p in projects]\n\n # Editable projects\n perm = 'rsr.change_project'\n perm_filter = user.get_permission_filter(perm, '')\n response_data[\"allow_edit_projects\"] = list(projects.filter(perm_filter).values_list('id', flat=True))\n\n return json.dumps(response_data)\n\n\n@require_POST\n@csrf_exempt\ndef api_key(request):\n \"\"\"On successful user credentials returns an auth token for API usage.\n\n Since RSR changed in v3 to allow users without an organisation we need to\n introduce a way to make old Up apps work as before but new ones support\n users without any connected organisations.\n \"\"\"\n request_format = request.GET.get('format', 'xml')\n username = request.POST.get('username', False)\n password = request.POST.get('password', False)\n handles_unemployed = bool(request.POST.get(\"handles_unemployed\", False))\n\n if username and password:\n try:\n user = authenticate(username=username, password=password)\n except ValidationError:\n user = None\n if user is not None and user.is_active:\n orgs = user.approved_organisations()\n if orgs or handles_unemployed:\n if request_format == 'xml':\n return HttpResponse(api_key_xml_response(user, orgs),\n content_type=\"text/xml\")\n elif request_format == 'json':\n return HttpResponse(api_key_json_response(user, orgs),\n content_type=\"application/json\")\n return HttpResponseForbidden()\n", "path": "akvo/rsr/views/account.py"}]} | 3,814 | 225 |
gh_patches_debug_12100 | rasdani/github-patches | git_diff | piskvorky__gensim-681 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError: Popping a non-existing value from `_keywords`
OS X 10.11.1
Python 2.7.10
gensim 0.12.4
To reproduce this in Python shell:
```
>>> import gensim.summarization
>>> t = "Victor S. Sage Compare Sage 50c Editions Find accounting software that's right for your business Every product comes with anytime, anywhere online access; automatic updates; access to unlimited support; access to built-in credit card processing and payroll; and advanced reporting. Three solutions for your business 1 user From $249/year Buy now Free Trial 1-5 users From $299/year Buy now Free Trial 3-40 users From $1,199/year Buy now Free Trial Essential Accounting Accounts payable, accounts receivable, cash management check check check open check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check Advanced Accounting Automated tasks, audit trail, budgeting, change order processing check check open check check check check check check check check check check check check check check check check check check check check check check check check check check check check In-depth Accounting Fast processing, industry-specific features, workflow management check open check check check check check check check check check check check Disclaimers open * This product is backed by a no-risk guarantee for first-time Sage 50 customers. If, within 60 days of purchase, you are not convinced that Sage 50 is the best accounting program for your business, we will refund your money (less and rebate you have received for this purchase). Dated proof of purchase and return of product is required. For details, call 877-481-0341."
>>> import gensim.summarization
>>> keywords = gensim.summarization.keywords(t, pos_filter=[], ratio=0.2, lemmatize=True, scores=True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/site-packages/gensim/summarization/keywords.py", line 229, in keywords
combined_keywords = _get_combined_keywords(keywords, text.split())
File "/usr/local/lib/python2.7/site-packages/gensim/summarization/keywords.py", line 171, in _get_combined_keywords
_keywords.pop(keyword)
KeyError: u'check'
```
</issue>
<code>
[start of gensim/summarization/keywords.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
5
6 from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank
7 from gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word
8 from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word
9 from gensim.summarization.commons import build_graph as _build_graph
10 from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
11 from gensim.utils import to_unicode
12 from itertools import combinations as _combinations
13 from six.moves.queue import Queue as _Queue
14 from six.moves import xrange
15 from six import iteritems
16
17
18 WINDOW_SIZE = 2
19
20 """
21 Check tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters
22 Example: filter for nouns and adjectives:
23 INCLUDING_FILTER = ['NN', 'JJ']
24 """
25 INCLUDING_FILTER = ['NN', 'JJ']
26 EXCLUDING_FILTER = []
27
28
29 def _get_pos_filters():
30 return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)
31
32
33 def _get_words_for_graph(tokens, pos_filter):
34 if pos_filter is None:
35 include_filters, exclude_filters = _get_pos_filters()
36 else:
37 include_filters = set(pos_filter)
38 exclude_filters = frozenset([])
39 if include_filters and exclude_filters:
40 raise ValueError("Can't use both include and exclude filters, should use only one")
41
42 result = []
43 for word, unit in iteritems(tokens):
44 if exclude_filters and unit.tag in exclude_filters:
45 continue
46 if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:
47 result.append(unit.token)
48 return result
49
50
51 def _get_first_window(split_text):
52 return split_text[:WINDOW_SIZE]
53
54
55 def _set_graph_edge(graph, tokens, word_a, word_b):
56 if word_a in tokens and word_b in tokens:
57 lemma_a = tokens[word_a].token
58 lemma_b = tokens[word_b].token
59 edge = (lemma_a, lemma_b)
60
61 if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):
62 graph.add_edge(edge)
63
64
65 def _process_first_window(graph, tokens, split_text):
66 first_window = _get_first_window(split_text)
67 for word_a, word_b in _combinations(first_window, 2):
68 _set_graph_edge(graph, tokens, word_a, word_b)
69
70
71 def _init_queue(split_text):
72 queue = _Queue()
73 first_window = _get_first_window(split_text)
74 for word in first_window[1:]:
75 queue.put(word)
76 return queue
77
78
79 def _process_word(graph, tokens, queue, word):
80 for word_to_compare in _queue_iterator(queue):
81 _set_graph_edge(graph, tokens, word, word_to_compare)
82
83
84 def _update_queue(queue, word):
85 queue.get()
86 queue.put(word)
87 assert queue.qsize() == (WINDOW_SIZE - 1)
88
89
90 def _process_text(graph, tokens, split_text):
91 queue = _init_queue(split_text)
92 for i in xrange(WINDOW_SIZE, len(split_text)):
93 word = split_text[i]
94 _process_word(graph, tokens, queue, word)
95 _update_queue(queue, word)
96
97
98 def _queue_iterator(queue):
99 iterations = queue.qsize()
100 for i in xrange(iterations):
101 var = queue.get()
102 yield var
103 queue.put(var)
104
105
106 def _set_graph_edges(graph, tokens, split_text):
107 _process_first_window(graph, tokens, split_text)
108 _process_text(graph, tokens, split_text)
109
110
111 def _extract_tokens(lemmas, scores, ratio, words):
112 lemmas.sort(key=lambda s: scores[s], reverse=True)
113
114 # If no "words" option is selected, the number of sentences is
115 # reduced by the provided ratio, else, the ratio is ignored.
116 length = len(lemmas) * ratio if words is None else words
117 return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]
118
119
120 def _lemmas_to_words(tokens):
121 lemma_to_word = {}
122 for word, unit in iteritems(tokens):
123 lemma = unit.token
124 if lemma in lemma_to_word:
125 lemma_to_word[lemma].append(word)
126 else:
127 lemma_to_word[lemma] = [word]
128 return lemma_to_word
129
130
131 def _get_keywords_with_score(extracted_lemmas, lemma_to_word):
132 """
133 :param extracted_lemmas:list of tuples
134 :param lemma_to_word: dict of {lemma:list of words}
135 :return: dict of {keyword:score}
136 """
137 keywords = {}
138 for score, lemma in extracted_lemmas:
139 keyword_list = lemma_to_word[lemma]
140 for keyword in keyword_list:
141 keywords[keyword] = score
142 return keywords
143
144
145 def _strip_word(word):
146 stripped_word_list = list(_tokenize_by_word(word))
147 return stripped_word_list[0] if stripped_word_list else ""
148
149
150 def _get_combined_keywords(_keywords, split_text):
151 """
152 :param keywords:dict of keywords:scores
153 :param split_text: list of strings
154 :return: combined_keywords:list
155 """
156 result = []
157 _keywords = _keywords.copy()
158 len_text = len(split_text)
159 for i in xrange(len_text):
160 word = _strip_word(split_text[i])
161 if word in _keywords:
162 combined_word = [word]
163 if i + 1 == len_text:
164 result.append(word) # appends last word if keyword and doesn't iterate
165 for j in xrange(i + 1, len_text):
166 other_word = _strip_word(split_text[j])
167 if other_word in _keywords and other_word == split_text[j]:
168 combined_word.append(other_word)
169 else:
170 for keyword in combined_word:
171 _keywords.pop(keyword)
172 result.append(" ".join(combined_word))
173 break
174 return result
175
176
177 def _get_average_score(concept, _keywords):
178 word_list = concept.split()
179 word_counter = 0
180 total = 0
181 for word in word_list:
182 total += _keywords[word]
183 word_counter += 1
184 return total / word_counter
185
186
187 def _format_results(_keywords, combined_keywords, split, scores):
188 """
189 :param keywords:dict of keywords:scores
190 :param combined_keywords:list of word/s
191 """
192 combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)
193 if scores:
194 return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]
195 if split:
196 return combined_keywords
197 return "\n".join(combined_keywords)
198
199
200 def keywords(text, ratio=0.2, words=None, split=False, scores=False, pos_filter=['NN', 'JJ'], lemmatize=False):
201 # Gets a dict of word -> lemma
202 text = to_unicode(text)
203 tokens = _clean_text_by_word(text)
204 split_text = list(_tokenize_by_word(text))
205
206 # Creates the graph and adds the edges
207 graph = _build_graph(_get_words_for_graph(tokens, pos_filter))
208 _set_graph_edges(graph, tokens, split_text)
209 del split_text # It's no longer used
210
211 _remove_unreachable_nodes(graph)
212
213 # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score
214 pagerank_scores = _pagerank(graph)
215
216 extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)
217
218 # The results can be polluted by many variations of the same word
219 if lemmatize:
220 lemmas_to_word = {}
221 for word, unit in iteritems(tokens):
222 lemmas_to_word[unit.token] = [word]
223 else:
224 lemmas_to_word = _lemmas_to_words(tokens)
225
226 keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)
227
228 # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined
229 combined_keywords = _get_combined_keywords(keywords, text.split())
230
231 return _format_results(keywords, combined_keywords, split, scores)
232
233
234 def get_graph(text):
235 tokens = _clean_text_by_word(text)
236 split_text = list(_tokenize_by_word(text))
237
238 graph = _build_graph(_get_words_for_graph(tokens))
239 _set_graph_edges(graph, tokens, split_text)
240
241 return graph
242
[end of gensim/summarization/keywords.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gensim/summarization/keywords.py b/gensim/summarization/keywords.py
--- a/gensim/summarization/keywords.py
+++ b/gensim/summarization/keywords.py
@@ -164,7 +164,7 @@
result.append(word) # appends last word if keyword and doesn't iterate
for j in xrange(i + 1, len_text):
other_word = _strip_word(split_text[j])
- if other_word in _keywords and other_word == split_text[j]:
+ if other_word in _keywords and other_word == split_text[j] and not other_word in combined_word:
combined_word.append(other_word)
else:
for keyword in combined_word:
| {"golden_diff": "diff --git a/gensim/summarization/keywords.py b/gensim/summarization/keywords.py\n--- a/gensim/summarization/keywords.py\n+++ b/gensim/summarization/keywords.py\n@@ -164,7 +164,7 @@\n result.append(word) # appends last word if keyword and doesn't iterate\n for j in xrange(i + 1, len_text):\n other_word = _strip_word(split_text[j])\n- if other_word in _keywords and other_word == split_text[j]:\n+ if other_word in _keywords and other_word == split_text[j] and not other_word in combined_word:\n combined_word.append(other_word)\n else:\n for keyword in combined_word:\n", "issue": "KeyError: Popping a non-existing value from `_keywords`\nOS X 10.11.1\nPython 2.7.10\ngensim 0.12.4\n\nTo reproduce this in Python shell:\n\n```\n>>> import gensim.summarization\n>>> t = \"Victor S. Sage Compare Sage 50c Editions Find accounting software that's right for your business Every product comes with anytime, anywhere online access; automatic updates; access to unlimited support; access to built-in credit card processing and payroll; and advanced reporting. Three solutions for your business 1 user From $249/year Buy now Free Trial 1-5 users From $299/year Buy now Free Trial 3-40 users From $1,199/year Buy now Free Trial Essential Accounting Accounts payable, accounts receivable, cash management check check check open check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check check Advanced Accounting Automated tasks, audit trail, budgeting, change order processing check check open check check check check check check check check check check check check check check check check check check check check check check check check check check check check In-depth Accounting Fast processing, industry-specific features, workflow management check open check check check check check check check check check check check Disclaimers open * This product is backed by a no-risk guarantee for first-time Sage 50 customers. If, within 60 days of purchase, you are not convinced that Sage 50 is the best accounting program for your business, we will refund your money (less and rebate you have received for this purchase). Dated proof of purchase and return of product is required. For details, call 877-481-0341.\"\n>>> import gensim.summarization\n>>> keywords = gensim.summarization.keywords(t, pos_filter=[], ratio=0.2, lemmatize=True, scores=True)\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/usr/local/lib/python2.7/site-packages/gensim/summarization/keywords.py\", line 229, in keywords\n combined_keywords = _get_combined_keywords(keywords, text.split())\n File \"/usr/local/lib/python2.7/site-packages/gensim/summarization/keywords.py\", line 171, in _get_combined_keywords\n _keywords.pop(keyword)\nKeyError: u'check'\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word\nfrom gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom gensim.utils import to_unicode\nfrom itertools import combinations as _combinations\nfrom six.moves.queue import Queue as _Queue\nfrom six.moves import xrange\nfrom six import iteritems\n\n\nWINDOW_SIZE = 2\n\n\"\"\"\nCheck tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters\nExample: filter for nouns and adjectives:\nINCLUDING_FILTER = ['NN', 'JJ']\n\"\"\"\nINCLUDING_FILTER = ['NN', 'JJ']\nEXCLUDING_FILTER = []\n\n\ndef _get_pos_filters():\n return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)\n\n\ndef _get_words_for_graph(tokens, pos_filter):\n if pos_filter is None:\n include_filters, exclude_filters = _get_pos_filters()\n else:\n include_filters = set(pos_filter)\n exclude_filters = frozenset([])\n if include_filters and exclude_filters:\n raise ValueError(\"Can't use both include and exclude filters, should use only one\")\n\n result = []\n for word, unit in iteritems(tokens):\n if exclude_filters and unit.tag in exclude_filters:\n continue\n if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:\n result.append(unit.token)\n return result\n\n\ndef _get_first_window(split_text):\n return split_text[:WINDOW_SIZE]\n\n\ndef _set_graph_edge(graph, tokens, word_a, word_b):\n if word_a in tokens and word_b in tokens:\n lemma_a = tokens[word_a].token\n lemma_b = tokens[word_b].token\n edge = (lemma_a, lemma_b)\n\n if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):\n graph.add_edge(edge)\n\n\ndef _process_first_window(graph, tokens, split_text):\n first_window = _get_first_window(split_text)\n for word_a, word_b in _combinations(first_window, 2):\n _set_graph_edge(graph, tokens, word_a, word_b)\n\n\ndef _init_queue(split_text):\n queue = _Queue()\n first_window = _get_first_window(split_text)\n for word in first_window[1:]:\n queue.put(word)\n return queue\n\n\ndef _process_word(graph, tokens, queue, word):\n for word_to_compare in _queue_iterator(queue):\n _set_graph_edge(graph, tokens, word, word_to_compare)\n\n\ndef _update_queue(queue, word):\n queue.get()\n queue.put(word)\n assert queue.qsize() == (WINDOW_SIZE - 1)\n\n\ndef _process_text(graph, tokens, split_text):\n queue = _init_queue(split_text)\n for i in xrange(WINDOW_SIZE, len(split_text)):\n word = split_text[i]\n _process_word(graph, tokens, queue, word)\n _update_queue(queue, word)\n\n\ndef _queue_iterator(queue):\n iterations = queue.qsize()\n for i in xrange(iterations):\n var = queue.get()\n yield var\n queue.put(var)\n\n\ndef _set_graph_edges(graph, tokens, split_text):\n _process_first_window(graph, tokens, split_text)\n _process_text(graph, tokens, split_text)\n\n\ndef _extract_tokens(lemmas, scores, ratio, words):\n lemmas.sort(key=lambda s: scores[s], reverse=True)\n\n # If no \"words\" option is selected, the number of sentences is\n # reduced by the provided ratio, else, the ratio is ignored.\n length = len(lemmas) * ratio if words is None else words\n return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]\n\n\ndef _lemmas_to_words(tokens):\n lemma_to_word = {}\n for word, unit in iteritems(tokens):\n lemma = unit.token\n if lemma in lemma_to_word:\n lemma_to_word[lemma].append(word)\n else:\n lemma_to_word[lemma] = [word]\n return lemma_to_word\n\n\ndef _get_keywords_with_score(extracted_lemmas, lemma_to_word):\n \"\"\"\n :param extracted_lemmas:list of tuples\n :param lemma_to_word: dict of {lemma:list of words}\n :return: dict of {keyword:score}\n \"\"\"\n keywords = {}\n for score, lemma in extracted_lemmas:\n keyword_list = lemma_to_word[lemma]\n for keyword in keyword_list:\n keywords[keyword] = score\n return keywords\n\n\ndef _strip_word(word):\n stripped_word_list = list(_tokenize_by_word(word))\n return stripped_word_list[0] if stripped_word_list else \"\"\n\n\ndef _get_combined_keywords(_keywords, split_text):\n \"\"\"\n :param keywords:dict of keywords:scores\n :param split_text: list of strings\n :return: combined_keywords:list\n \"\"\"\n result = []\n _keywords = _keywords.copy()\n len_text = len(split_text)\n for i in xrange(len_text):\n word = _strip_word(split_text[i])\n if word in _keywords:\n combined_word = [word]\n if i + 1 == len_text:\n result.append(word) # appends last word if keyword and doesn't iterate\n for j in xrange(i + 1, len_text):\n other_word = _strip_word(split_text[j])\n if other_word in _keywords and other_word == split_text[j]:\n combined_word.append(other_word)\n else:\n for keyword in combined_word:\n _keywords.pop(keyword)\n result.append(\" \".join(combined_word))\n break\n return result\n\n\ndef _get_average_score(concept, _keywords):\n word_list = concept.split()\n word_counter = 0\n total = 0\n for word in word_list:\n total += _keywords[word]\n word_counter += 1\n return total / word_counter\n\n\ndef _format_results(_keywords, combined_keywords, split, scores):\n \"\"\"\n :param keywords:dict of keywords:scores\n :param combined_keywords:list of word/s\n \"\"\"\n combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)\n if scores:\n return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]\n if split:\n return combined_keywords\n return \"\\n\".join(combined_keywords)\n\n\ndef keywords(text, ratio=0.2, words=None, split=False, scores=False, pos_filter=['NN', 'JJ'], lemmatize=False):\n # Gets a dict of word -> lemma\n text = to_unicode(text)\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n # Creates the graph and adds the edges\n graph = _build_graph(_get_words_for_graph(tokens, pos_filter))\n _set_graph_edges(graph, tokens, split_text)\n del split_text # It's no longer used\n\n _remove_unreachable_nodes(graph)\n\n # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score\n pagerank_scores = _pagerank(graph)\n\n extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)\n\n # The results can be polluted by many variations of the same word\n if lemmatize:\n lemmas_to_word = {}\n for word, unit in iteritems(tokens):\n lemmas_to_word[unit.token] = [word]\n else:\n lemmas_to_word = _lemmas_to_words(tokens)\n\n keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)\n\n # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined\n combined_keywords = _get_combined_keywords(keywords, text.split())\n\n return _format_results(keywords, combined_keywords, split, scores)\n\n\ndef get_graph(text):\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n graph = _build_graph(_get_words_for_graph(tokens))\n _set_graph_edges(graph, tokens, split_text)\n\n return graph\n", "path": "gensim/summarization/keywords.py"}]} | 3,594 | 165 |
gh_patches_debug_14978 | rasdani/github-patches | git_diff | archlinux__archinstall-2241 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Solved] broken intramfs, black screen
Broken initramfs after clean install with encryption, kde desktop and propretary nvidia drivers.
1. Loads grub
2. Decrypt drive
3. Loads initramfs
4. Black screen with no output
</issue>
<code>
[start of archinstall/lib/hardware.py]
1 import os
2 from enum import Enum
3 from functools import cached_property
4 from pathlib import Path
5 from typing import Optional, Dict, List
6
7 from .exceptions import SysCallError
8 from .general import SysCommand
9 from .networking import list_interfaces, enrich_iface_types
10 from .output import debug
11
12
13 class CpuVendor(Enum):
14 AuthenticAMD = 'amd'
15 GenuineIntel = 'intel'
16 _Unknown = 'unknown'
17
18 @classmethod
19 def get_vendor(cls, name: str) -> 'CpuVendor':
20 if vendor := getattr(cls, name, None):
21 return vendor
22 else:
23 debug(f"Unknown CPU vendor '{name}' detected.")
24 return cls._Unknown
25
26 def _has_microcode(self) -> bool:
27 match self:
28 case CpuVendor.AuthenticAMD | CpuVendor.GenuineIntel:
29 return True
30 case _:
31 return False
32
33 def get_ucode(self) -> Optional[Path]:
34 if self._has_microcode():
35 return Path(self.value + '-ucode.img')
36 return None
37
38
39 class GfxPackage(Enum):
40 IntelMediaDriver = 'intel-media-driver'
41 LibvaIntelDriver = 'libva-intel-driver'
42 LibvaMesaDriver = 'libva-mesa-driver'
43 Mesa = "mesa"
44 Nvidia = 'nvidia'
45 NvidiaOpen = 'nvidia-open'
46 VulkanIntel = 'vulkan-intel'
47 VulkanRadeon = 'vulkan-radeon'
48 Xf86VideoAmdgpu = "xf86-video-amdgpu"
49 Xf86VideoAti = "xf86-video-ati"
50 Xf86VideoNouveau = 'xf86-video-nouveau'
51 Xf86VideoVmware = 'xf86-video-vmware'
52
53
54 class GfxDriver(Enum):
55 AllOpenSource = 'All open-source'
56 AmdOpenSource = 'AMD / ATI (open-source)'
57 IntelOpenSource = 'Intel (open-source)'
58 NvidiaOpenKernel = 'Nvidia (open kernel module for newer GPUs, Turing+)'
59 NvidiaOpenSource = 'Nvidia (open-source nouveau driver)'
60 NvidiaProprietary = 'Nvidia (proprietary)'
61 VMOpenSource = 'VMware / VirtualBox (open-source)'
62
63 def is_nvidia(self) -> bool:
64 match self:
65 case GfxDriver.NvidiaProprietary | \
66 GfxDriver.NvidiaOpenSource | \
67 GfxDriver.NvidiaOpenKernel:
68 return True
69 case _:
70 return False
71
72 def packages(self) -> List[GfxPackage]:
73 match self:
74 case GfxDriver.AllOpenSource:
75 return [
76 GfxPackage.Mesa,
77 GfxPackage.Xf86VideoAmdgpu,
78 GfxPackage.Xf86VideoAti,
79 GfxPackage.Xf86VideoNouveau,
80 GfxPackage.Xf86VideoVmware,
81 GfxPackage.LibvaMesaDriver,
82 GfxPackage.LibvaIntelDriver,
83 GfxPackage.IntelMediaDriver,
84 GfxPackage.VulkanRadeon,
85 GfxPackage.VulkanIntel
86 ]
87 case GfxDriver.AmdOpenSource:
88 return [
89 GfxPackage.Mesa,
90 GfxPackage.Xf86VideoAmdgpu,
91 GfxPackage.Xf86VideoAti,
92 GfxPackage.LibvaMesaDriver,
93 GfxPackage.VulkanRadeon
94 ]
95 case GfxDriver.IntelOpenSource:
96 return [
97 GfxPackage.Mesa,
98 GfxPackage.LibvaIntelDriver,
99 GfxPackage.IntelMediaDriver,
100 GfxPackage.VulkanIntel
101 ]
102 case GfxDriver.NvidiaOpenKernel:
103 return [GfxPackage.NvidiaOpen]
104 case GfxDriver.NvidiaOpenSource:
105 return [
106 GfxPackage.Mesa,
107 GfxPackage.Xf86VideoNouveau,
108 GfxPackage.LibvaMesaDriver
109 ]
110 case GfxDriver.NvidiaProprietary:
111 return [GfxPackage.Nvidia]
112 case GfxDriver.VMOpenSource:
113 return [
114 GfxPackage.Mesa,
115 GfxPackage.Xf86VideoVmware
116 ]
117
118
119 class _SysInfo:
120 def __init__(self):
121 pass
122
123 @cached_property
124 def cpu_info(self) -> Dict[str, str]:
125 """
126 Returns system cpu information
127 """
128 cpu_info_path = Path("/proc/cpuinfo")
129 cpu: Dict[str, str] = {}
130
131 with cpu_info_path.open() as file:
132 for line in file:
133 if line := line.strip():
134 key, value = line.split(":", maxsplit=1)
135 cpu[key.strip()] = value.strip()
136
137 return cpu
138
139 @cached_property
140 def mem_info(self) -> Dict[str, int]:
141 """
142 Returns system memory information
143 """
144 mem_info_path = Path("/proc/meminfo")
145 mem_info: Dict[str, int] = {}
146
147 with mem_info_path.open() as file:
148 for line in file:
149 key, value = line.strip().split(':')
150 num = value.split()[0]
151 mem_info[key] = int(num)
152
153 return mem_info
154
155 def mem_info_by_key(self, key: str) -> int:
156 return self.mem_info[key]
157
158 @cached_property
159 def loaded_modules(self) -> List[str]:
160 """
161 Returns loaded kernel modules
162 """
163 modules_path = Path('/proc/modules')
164 modules: List[str] = []
165
166 with modules_path.open() as file:
167 for line in file:
168 module = line.split(maxsplit=1)[0]
169 modules.append(module)
170
171 return modules
172
173
174 _sys_info = _SysInfo()
175
176
177 class SysInfo:
178 @staticmethod
179 def has_wifi() -> bool:
180 ifaces = list(list_interfaces().values())
181 return 'WIRELESS' in enrich_iface_types(ifaces).values()
182
183 @staticmethod
184 def has_uefi() -> bool:
185 return os.path.isdir('/sys/firmware/efi')
186
187 @staticmethod
188 def _graphics_devices() -> Dict[str, str]:
189 cards: Dict[str, str] = {}
190 for line in SysCommand("lspci"):
191 if b' VGA ' in line or b' 3D ' in line:
192 _, identifier = line.split(b': ', 1)
193 cards[identifier.strip().decode('UTF-8')] = str(line)
194 return cards
195
196 @staticmethod
197 def has_nvidia_graphics() -> bool:
198 return any('nvidia' in x.lower() for x in SysInfo._graphics_devices())
199
200 @staticmethod
201 def has_amd_graphics() -> bool:
202 return any('amd' in x.lower() for x in SysInfo._graphics_devices())
203
204 @staticmethod
205 def has_intel_graphics() -> bool:
206 return any('intel' in x.lower() for x in SysInfo._graphics_devices())
207
208 @staticmethod
209 def cpu_vendor() -> Optional[CpuVendor]:
210 if vendor := _sys_info.cpu_info.get('vendor_id'):
211 return CpuVendor.get_vendor(vendor)
212 return None
213
214 @staticmethod
215 def cpu_model() -> Optional[str]:
216 return _sys_info.cpu_info.get('model name', None)
217
218 @staticmethod
219 def sys_vendor() -> str:
220 with open(f"/sys/devices/virtual/dmi/id/sys_vendor") as vendor:
221 return vendor.read().strip()
222
223 @staticmethod
224 def product_name() -> str:
225 with open(f"/sys/devices/virtual/dmi/id/product_name") as product:
226 return product.read().strip()
227
228 @staticmethod
229 def mem_available() -> int:
230 return _sys_info.mem_info_by_key('MemAvailable')
231
232 @staticmethod
233 def mem_free() -> int:
234 return _sys_info.mem_info_by_key('MemFree')
235
236 @staticmethod
237 def mem_total() -> int:
238 return _sys_info.mem_info_by_key('MemTotal')
239
240 @staticmethod
241 def virtualization() -> Optional[str]:
242 try:
243 return str(SysCommand("systemd-detect-virt")).strip('\r\n')
244 except SysCallError as err:
245 debug(f"Could not detect virtual system: {err}")
246
247 return None
248
249 @staticmethod
250 def is_vm() -> bool:
251 try:
252 result = SysCommand("systemd-detect-virt")
253 return b"none" not in b"".join(result).lower()
254 except SysCallError as err:
255 debug(f"System is not running in a VM: {err}")
256
257 return False
258
259 @staticmethod
260 def requires_sof_fw() -> bool:
261 return 'snd_sof' in _sys_info.loaded_modules
262
263 @staticmethod
264 def requires_alsa_fw() -> bool:
265 modules = (
266 'snd_asihpi',
267 'snd_cs46xx',
268 'snd_darla20',
269 'snd_darla24',
270 'snd_echo3g',
271 'snd_emu10k1',
272 'snd_gina20',
273 'snd_gina24',
274 'snd_hda_codec_ca0132',
275 'snd_hdsp',
276 'snd_indigo',
277 'snd_indigodj',
278 'snd_indigodjx',
279 'snd_indigoio',
280 'snd_indigoiox',
281 'snd_layla20',
282 'snd_layla24',
283 'snd_mia',
284 'snd_mixart',
285 'snd_mona',
286 'snd_pcxhr',
287 'snd_vx_lib'
288 )
289
290 for loaded_module in _sys_info.loaded_modules:
291 if loaded_module in modules:
292 return True
293
294 return False
295
[end of archinstall/lib/hardware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py
--- a/archinstall/lib/hardware.py
+++ b/archinstall/lib/hardware.py
@@ -42,6 +42,7 @@
LibvaMesaDriver = 'libva-mesa-driver'
Mesa = "mesa"
Nvidia = 'nvidia'
+ NvidiaDKMS = 'nvidia-dkms'
NvidiaOpen = 'nvidia-open'
VulkanIntel = 'vulkan-intel'
VulkanRadeon = 'vulkan-radeon'
@@ -108,7 +109,10 @@
GfxPackage.LibvaMesaDriver
]
case GfxDriver.NvidiaProprietary:
- return [GfxPackage.Nvidia]
+ return [
+ GfxPackage.Nvidia,
+ GfxPackage.NvidiaDKMS
+ ]
case GfxDriver.VMOpenSource:
return [
GfxPackage.Mesa,
| {"golden_diff": "diff --git a/archinstall/lib/hardware.py b/archinstall/lib/hardware.py\n--- a/archinstall/lib/hardware.py\n+++ b/archinstall/lib/hardware.py\n@@ -42,6 +42,7 @@\n \tLibvaMesaDriver = 'libva-mesa-driver'\n \tMesa = \"mesa\"\n \tNvidia = 'nvidia'\n+\tNvidiaDKMS = 'nvidia-dkms'\n \tNvidiaOpen = 'nvidia-open'\n \tVulkanIntel = 'vulkan-intel'\n \tVulkanRadeon = 'vulkan-radeon'\n@@ -108,7 +109,10 @@\n \t\t\t\t\tGfxPackage.LibvaMesaDriver\n \t\t\t\t]\n \t\t\tcase GfxDriver.NvidiaProprietary:\n-\t\t\t\treturn [GfxPackage.Nvidia]\n+\t\t\t\treturn [\n+\t\t\t\t\tGfxPackage.Nvidia,\n+\t\t\t\t\tGfxPackage.NvidiaDKMS\n+\t\t\t\t]\n \t\t\tcase GfxDriver.VMOpenSource:\n \t\t\t\treturn [\n \t\t\t\t\tGfxPackage.Mesa,\n", "issue": "[Solved] broken intramfs, black screen\nBroken initramfs after clean install with encryption, kde desktop and propretary nvidia drivers.\r\n1. Loads grub\r\n2. Decrypt drive\r\n3. Loads initramfs\r\n4. Black screen with no output\n", "before_files": [{"content": "import os\nfrom enum import Enum\nfrom functools import cached_property\nfrom pathlib import Path\nfrom typing import Optional, Dict, List\n\nfrom .exceptions import SysCallError\nfrom .general import SysCommand\nfrom .networking import list_interfaces, enrich_iface_types\nfrom .output import debug\n\n\nclass CpuVendor(Enum):\n\tAuthenticAMD = 'amd'\n\tGenuineIntel = 'intel'\n\t_Unknown = 'unknown'\n\n\t@classmethod\n\tdef get_vendor(cls, name: str) -> 'CpuVendor':\n\t\tif vendor := getattr(cls, name, None):\n\t\t\treturn vendor\n\t\telse:\n\t\t\tdebug(f\"Unknown CPU vendor '{name}' detected.\")\n\t\t\treturn cls._Unknown\n\n\tdef _has_microcode(self) -> bool:\n\t\tmatch self:\n\t\t\tcase CpuVendor.AuthenticAMD | CpuVendor.GenuineIntel:\n\t\t\t\treturn True\n\t\t\tcase _:\n\t\t\t\treturn False\n\n\tdef get_ucode(self) -> Optional[Path]:\n\t\tif self._has_microcode():\n\t\t\treturn Path(self.value + '-ucode.img')\n\t\treturn None\n\n\nclass GfxPackage(Enum):\n\tIntelMediaDriver = 'intel-media-driver'\n\tLibvaIntelDriver = 'libva-intel-driver'\n\tLibvaMesaDriver = 'libva-mesa-driver'\n\tMesa = \"mesa\"\n\tNvidia = 'nvidia'\n\tNvidiaOpen = 'nvidia-open'\n\tVulkanIntel = 'vulkan-intel'\n\tVulkanRadeon = 'vulkan-radeon'\n\tXf86VideoAmdgpu = \"xf86-video-amdgpu\"\n\tXf86VideoAti = \"xf86-video-ati\"\n\tXf86VideoNouveau = 'xf86-video-nouveau'\n\tXf86VideoVmware = 'xf86-video-vmware'\n\n\nclass GfxDriver(Enum):\n\tAllOpenSource = 'All open-source'\n\tAmdOpenSource = 'AMD / ATI (open-source)'\n\tIntelOpenSource = 'Intel (open-source)'\n\tNvidiaOpenKernel = 'Nvidia (open kernel module for newer GPUs, Turing+)'\n\tNvidiaOpenSource = 'Nvidia (open-source nouveau driver)'\n\tNvidiaProprietary = 'Nvidia (proprietary)'\n\tVMOpenSource = 'VMware / VirtualBox (open-source)'\n\n\tdef is_nvidia(self) -> bool:\n\t\tmatch self:\n\t\t\tcase GfxDriver.NvidiaProprietary | \\\n\t\t\t\tGfxDriver.NvidiaOpenSource | \\\n\t\t\t\tGfxDriver.NvidiaOpenKernel:\n\t\t\t\treturn True\n\t\t\tcase _:\n\t\t\t\treturn False\n\n\tdef packages(self) -> List[GfxPackage]:\n\t\tmatch self:\n\t\t\tcase GfxDriver.AllOpenSource:\n\t\t\t\treturn [\n\t\t\t\t\tGfxPackage.Mesa,\n\t\t\t\t\tGfxPackage.Xf86VideoAmdgpu,\n\t\t\t\t\tGfxPackage.Xf86VideoAti,\n\t\t\t\t\tGfxPackage.Xf86VideoNouveau,\n\t\t\t\t\tGfxPackage.Xf86VideoVmware,\n\t\t\t\t\tGfxPackage.LibvaMesaDriver,\n\t\t\t\t\tGfxPackage.LibvaIntelDriver,\n\t\t\t\t\tGfxPackage.IntelMediaDriver,\n\t\t\t\t\tGfxPackage.VulkanRadeon,\n\t\t\t\t\tGfxPackage.VulkanIntel\n\t\t\t\t]\n\t\t\tcase GfxDriver.AmdOpenSource:\n\t\t\t\treturn [\n\t\t\t\t\tGfxPackage.Mesa,\n\t\t\t\t\tGfxPackage.Xf86VideoAmdgpu,\n\t\t\t\t\tGfxPackage.Xf86VideoAti,\n\t\t\t\t\tGfxPackage.LibvaMesaDriver,\n\t\t\t\t\tGfxPackage.VulkanRadeon\n\t\t\t\t]\n\t\t\tcase GfxDriver.IntelOpenSource:\n\t\t\t\treturn [\n\t\t\t\t\tGfxPackage.Mesa,\n\t\t\t\t\tGfxPackage.LibvaIntelDriver,\n\t\t\t\t\tGfxPackage.IntelMediaDriver,\n\t\t\t\t\tGfxPackage.VulkanIntel\n\t\t\t\t]\n\t\t\tcase GfxDriver.NvidiaOpenKernel:\n\t\t\t\treturn [GfxPackage.NvidiaOpen]\n\t\t\tcase GfxDriver.NvidiaOpenSource:\n\t\t\t\treturn [\n\t\t\t\t\tGfxPackage.Mesa,\n\t\t\t\t\tGfxPackage.Xf86VideoNouveau,\n\t\t\t\t\tGfxPackage.LibvaMesaDriver\n\t\t\t\t]\n\t\t\tcase GfxDriver.NvidiaProprietary:\n\t\t\t\treturn [GfxPackage.Nvidia]\n\t\t\tcase GfxDriver.VMOpenSource:\n\t\t\t\treturn [\n\t\t\t\t\tGfxPackage.Mesa,\n\t\t\t\t\tGfxPackage.Xf86VideoVmware\n\t\t\t\t]\n\n\nclass _SysInfo:\n\tdef __init__(self):\n\t\tpass\n\n\t@cached_property\n\tdef cpu_info(self) -> Dict[str, str]:\n\t\t\"\"\"\n\t\tReturns system cpu information\n\t\t\"\"\"\n\t\tcpu_info_path = Path(\"/proc/cpuinfo\")\n\t\tcpu: Dict[str, str] = {}\n\n\t\twith cpu_info_path.open() as file:\n\t\t\tfor line in file:\n\t\t\t\tif line := line.strip():\n\t\t\t\t\tkey, value = line.split(\":\", maxsplit=1)\n\t\t\t\t\tcpu[key.strip()] = value.strip()\n\n\t\treturn cpu\n\n\t@cached_property\n\tdef mem_info(self) -> Dict[str, int]:\n\t\t\"\"\"\n\t\tReturns system memory information\n\t\t\"\"\"\n\t\tmem_info_path = Path(\"/proc/meminfo\")\n\t\tmem_info: Dict[str, int] = {}\n\n\t\twith mem_info_path.open() as file:\n\t\t\tfor line in file:\n\t\t\t\tkey, value = line.strip().split(':')\n\t\t\t\tnum = value.split()[0]\n\t\t\t\tmem_info[key] = int(num)\n\n\t\treturn mem_info\n\n\tdef mem_info_by_key(self, key: str) -> int:\n\t\treturn self.mem_info[key]\n\n\t@cached_property\n\tdef loaded_modules(self) -> List[str]:\n\t\t\"\"\"\n\t\tReturns loaded kernel modules\n\t\t\"\"\"\n\t\tmodules_path = Path('/proc/modules')\n\t\tmodules: List[str] = []\n\n\t\twith modules_path.open() as file:\n\t\t\tfor line in file:\n\t\t\t\tmodule = line.split(maxsplit=1)[0]\n\t\t\t\tmodules.append(module)\n\n\t\treturn modules\n\n\n_sys_info = _SysInfo()\n\n\nclass SysInfo:\n\t@staticmethod\n\tdef has_wifi() -> bool:\n\t\tifaces = list(list_interfaces().values())\n\t\treturn 'WIRELESS' in enrich_iface_types(ifaces).values()\n\n\t@staticmethod\n\tdef has_uefi() -> bool:\n\t\treturn os.path.isdir('/sys/firmware/efi')\n\n\t@staticmethod\n\tdef _graphics_devices() -> Dict[str, str]:\n\t\tcards: Dict[str, str] = {}\n\t\tfor line in SysCommand(\"lspci\"):\n\t\t\tif b' VGA ' in line or b' 3D ' in line:\n\t\t\t\t_, identifier = line.split(b': ', 1)\n\t\t\t\tcards[identifier.strip().decode('UTF-8')] = str(line)\n\t\treturn cards\n\n\t@staticmethod\n\tdef has_nvidia_graphics() -> bool:\n\t\treturn any('nvidia' in x.lower() for x in SysInfo._graphics_devices())\n\n\t@staticmethod\n\tdef has_amd_graphics() -> bool:\n\t\treturn any('amd' in x.lower() for x in SysInfo._graphics_devices())\n\n\t@staticmethod\n\tdef has_intel_graphics() -> bool:\n\t\treturn any('intel' in x.lower() for x in SysInfo._graphics_devices())\n\n\t@staticmethod\n\tdef cpu_vendor() -> Optional[CpuVendor]:\n\t\tif vendor := _sys_info.cpu_info.get('vendor_id'):\n\t\t\treturn CpuVendor.get_vendor(vendor)\n\t\treturn None\n\n\t@staticmethod\n\tdef cpu_model() -> Optional[str]:\n\t\treturn _sys_info.cpu_info.get('model name', None)\n\n\t@staticmethod\n\tdef sys_vendor() -> str:\n\t\twith open(f\"/sys/devices/virtual/dmi/id/sys_vendor\") as vendor:\n\t\t\treturn vendor.read().strip()\n\n\t@staticmethod\n\tdef product_name() -> str:\n\t\twith open(f\"/sys/devices/virtual/dmi/id/product_name\") as product:\n\t\t\treturn product.read().strip()\n\n\t@staticmethod\n\tdef mem_available() -> int:\n\t\treturn _sys_info.mem_info_by_key('MemAvailable')\n\n\t@staticmethod\n\tdef mem_free() -> int:\n\t\treturn _sys_info.mem_info_by_key('MemFree')\n\n\t@staticmethod\n\tdef mem_total() -> int:\n\t\treturn _sys_info.mem_info_by_key('MemTotal')\n\n\t@staticmethod\n\tdef virtualization() -> Optional[str]:\n\t\ttry:\n\t\t\treturn str(SysCommand(\"systemd-detect-virt\")).strip('\\r\\n')\n\t\texcept SysCallError as err:\n\t\t\tdebug(f\"Could not detect virtual system: {err}\")\n\n\t\treturn None\n\n\t@staticmethod\n\tdef is_vm() -> bool:\n\t\ttry:\n\t\t\tresult = SysCommand(\"systemd-detect-virt\")\n\t\t\treturn b\"none\" not in b\"\".join(result).lower()\n\t\texcept SysCallError as err:\n\t\t\tdebug(f\"System is not running in a VM: {err}\")\n\n\t\treturn False\n\n\t@staticmethod\n\tdef requires_sof_fw() -> bool:\n\t\treturn 'snd_sof' in _sys_info.loaded_modules\n\n\t@staticmethod\n\tdef requires_alsa_fw() -> bool:\n\t\tmodules = (\n\t\t\t'snd_asihpi',\n\t\t\t'snd_cs46xx',\n\t\t\t'snd_darla20',\n\t\t\t'snd_darla24',\n\t\t\t'snd_echo3g',\n\t\t\t'snd_emu10k1',\n\t\t\t'snd_gina20',\n\t\t\t'snd_gina24',\n\t\t\t'snd_hda_codec_ca0132',\n\t\t\t'snd_hdsp',\n\t\t\t'snd_indigo',\n\t\t\t'snd_indigodj',\n\t\t\t'snd_indigodjx',\n\t\t\t'snd_indigoio',\n\t\t\t'snd_indigoiox',\n\t\t\t'snd_layla20',\n\t\t\t'snd_layla24',\n\t\t\t'snd_mia',\n\t\t\t'snd_mixart',\n\t\t\t'snd_mona',\n\t\t\t'snd_pcxhr',\n\t\t\t'snd_vx_lib'\n\t\t)\n\n\t\tfor loaded_module in _sys_info.loaded_modules:\n\t\t\tif loaded_module in modules:\n\t\t\t\treturn True\n\n\t\treturn False\n", "path": "archinstall/lib/hardware.py"}]} | 3,573 | 220 |
gh_patches_debug_39439 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-474 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Development server not starting when GVZ API not reachable
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
When the [GVZ API](http://gvz.integreat-app.de/api/search/expect_empty_json) is reachable, but does not answer, the development server hangs during start, because there is no timeout defined for the api request [here](https://github.com/Integreat/cms-django/blob/f4f4c698575e975655b1333f630b5f0d6e4e034f/src/gvz_api/apps.py#L27).
### Steps to Reproduce
1. Execute `./dev-tools/run.sh` when GVZ API is reachable but does not answer (tested this morning)
2. Wait
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
The server should start and print a log message that the gvz api is not available
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
The server does not start
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
Development server not starting when GVZ API not reachable
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
When the [GVZ API](http://gvz.integreat-app.de/api/search/expect_empty_json) is reachable, but does not answer, the development server hangs during start, because there is no timeout defined for the api request [here](https://github.com/Integreat/cms-django/blob/f4f4c698575e975655b1333f630b5f0d6e4e034f/src/gvz_api/apps.py#L27).
### Steps to Reproduce
1. Execute `./dev-tools/run.sh` when GVZ API is reachable but does not answer (tested this morning)
2. Wait
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
The server should start and print a log message that the gvz api is not available
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
The server does not start
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
</issue>
<code>
[start of src/gvz_api/apps.py]
1 """
2 Configuration of GVZ API app
3 """
4 import logging
5 import json
6 import requests
7 from django.apps import AppConfig
8 from django.conf import settings
9
10 logger = logging.getLogger(__name__)
11
12
13 class GvzApiConfig(AppConfig):
14 """
15 GVZ API config inheriting the django AppConfig
16 """
17
18 name = "gvz_api"
19 api_available = False
20
21 def ready(self):
22 """
23 Checking if API is available
24 """
25 if settings.GVZ_API_ENABLED:
26 try:
27 response = requests.get(
28 f"{settings.GVZ_API_URL}/search/expect_empty_json"
29 )
30 json.loads(response.text)
31 except json.decoder.JSONDecodeError:
32 self.api_available = False
33 except requests.exceptions.RequestException:
34 self.api_available = False
35 else:
36 self.api_available = True
37 else:
38 self.api_available = False
39 if not self.api_available:
40 logger.info(
41 "GVZ API is not available. You won't be able to "
42 "automatically import coordinates and region aliases."
43 )
44 else:
45 self.api_available = True
46 logger.info("GVZ API is available.")
47
[end of src/gvz_api/apps.py]
[start of src/backend/settings.py]
1 """
2 Django settings for backend project.
3
4 For more information on this file, see :doc:`topics/settings`.
5 For the full list of settings and their values, see :doc:`ref/settings`.
6 """
7 import os
8
9 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
10 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
11
12
13 # Version number
14 VERSION = "0.0.14"
15
16 # Quick-start development settings - unsuitable for production
17 # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
18
19 # SECURITY WARNING: keep the secret key used in production secret!
20 SECRET_KEY = "-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_"
21
22 # SECURITY WARNING: don't run with debug turned on in production!
23 DEBUG = True
24
25 ALLOWED_HOSTS = ["localhost", "127.0.0.1", "0.0.0.0"]
26
27 # Needed for webauthn (this is a setting in case the application runs behind a proxy)
28 HOSTNAME = "localhost"
29 BASE_URL = "http://localhost:8000"
30
31 # Application definition
32
33 INSTALLED_APPS = [
34 "cms.apps.CmsConfig",
35 "gvz_api.apps.GvzApiConfig",
36 "django.contrib.admin",
37 "django.contrib.auth",
38 "django.contrib.contenttypes",
39 "django.contrib.messages",
40 "django.contrib.sessions",
41 "django.contrib.staticfiles",
42 "compressor",
43 "compressor_toolkit",
44 "corsheaders",
45 "widget_tweaks",
46 "easy_thumbnails",
47 "filer",
48 "mptt",
49 "rules.apps.AutodiscoverRulesConfig",
50 ]
51
52 MIDDLEWARE = [
53 "corsheaders.middleware.CorsMiddleware",
54 "django.middleware.security.SecurityMiddleware",
55 "django.contrib.sessions.middleware.SessionMiddleware",
56 "django.middleware.locale.LocaleMiddleware",
57 "django.middleware.common.CommonMiddleware",
58 "django.middleware.csrf.CsrfViewMiddleware",
59 "django.contrib.auth.middleware.AuthenticationMiddleware",
60 "django.contrib.messages.middleware.MessageMiddleware",
61 "django.middleware.clickjacking.XFrameOptionsMiddleware",
62 ]
63
64 ROOT_URLCONF = "backend.urls"
65 THUMBNAIL_HIGH_RESOLUTION = True
66
67 TEMPLATES = [
68 {
69 "BACKEND": "django.template.backends.django.DjangoTemplates",
70 "DIRS": [],
71 "APP_DIRS": True,
72 "OPTIONS": {
73 "context_processors": [
74 "django.template.context_processors.debug",
75 "django.template.context_processors.request",
76 "django.contrib.auth.context_processors.auth",
77 "django.contrib.messages.context_processors.messages",
78 "backend.context_processors.region_slug_processor",
79 ],
80 "debug": DEBUG,
81 },
82 },
83 ]
84
85 WSGI_APPLICATION = "backend.wsgi.application"
86
87
88 # Database
89 # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
90
91 DATABASES = {
92 "default": {
93 "ENGINE": "django.db.backends.postgresql_psycopg2",
94 "NAME": "integreat",
95 "USER": "integreat",
96 "PASSWORD": "password",
97 "HOST": "localhost",
98 "PORT": "5432",
99 }
100 }
101
102 # Directory for initial database contents
103
104 FIXTURE_DIRS = (os.path.join(BASE_DIR, "cms/fixtures/"),)
105
106 # Authentication backends
107
108 AUTHENTICATION_BACKENDS = (
109 "rules.permissions.ObjectPermissionBackend",
110 "django.contrib.auth.backends.ModelBackend", # this is default
111 )
112
113
114 # Password validation
115 # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
116
117 AUTH_PASSWORD_VALIDATORS = [
118 {
119 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
120 },
121 {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
122 {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
123 {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
124 ]
125
126
127 # Internationalization
128 # https://docs.djangoproject.com/en/2.2/topics/i18n/
129
130 LANGUAGES = (
131 ("en-us", "English"),
132 ("de-de", "Deutsch"),
133 )
134
135 LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),)
136
137 LANGUAGE_CODE = "de-de"
138
139 TIME_ZONE = "UTC"
140
141 USE_I18N = True
142
143 USE_L10N = True
144
145 USE_TZ = True
146
147
148 # Static files (CSS, JavaScript, Images)
149 # https://docs.djangoproject.com/en/2.2/howto/static-files/
150
151 STATICFILES_DIRS = [
152 os.path.join(BASE_DIR, "../node_modules"),
153 ]
154 STATIC_URL = "/static/"
155 STATIC_ROOT = os.path.join(BASE_DIR, "cms/static/")
156
157 # Login
158 LOGIN_URL = "/login"
159 LOGIN_REDIRECT_URL = "/"
160 LOGOUT_REDIRECT_URL = "/login"
161
162 # Miscellaneous
163 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
164 CSRF_FAILURE_VIEW = "cms.views.error_handler.csrf_failure"
165
166 MEDIA_URL = "/media/"
167 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
168 FILER_CANONICAL_URL = "media/"
169
170 LOGGING = {
171 "version": 1,
172 "disable_existing_loggers": False,
173 "formatters": {
174 "default": {"format": "INTEGREAT CMS - %(levelname)s: %(message)s",},
175 "console": {
176 "format": "%(asctime)s INTEGREAT CMS - %(levelname)s: %(message)s",
177 "datefmt": "%b %d %H:%M:%S",
178 },
179 },
180 "handlers": {
181 "console": {"class": "logging.StreamHandler", "formatter": "console"},
182 "authlog": {
183 "level": "INFO",
184 "class": "logging.handlers.SysLogHandler",
185 "address": "/dev/log",
186 "facility": "auth",
187 "formatter": "default",
188 },
189 "syslog": {
190 "level": "INFO",
191 "class": "logging.handlers.SysLogHandler",
192 "address": "/dev/log",
193 "facility": "syslog",
194 "formatter": "default",
195 },
196 },
197 "loggers": {
198 "django": {"handlers": ["console"], "level": "WARN", "propagate": True,},
199 "api": {"handlers": ["console"], "level": "INFO", "propagate": True,},
200 "cms": {"handlers": ["console"], "level": "INFO", "propagate": True,},
201 "rules": {"handlers": ["console"], "level": "DEBUG", "propagate": True,},
202 "auth": {"handlers": ["console", "authlog", "syslog"], "level": "INFO",},
203 },
204 }
205
206 STATICFILES_FINDERS = (
207 "django.contrib.staticfiles.finders.FileSystemFinder",
208 "django.contrib.staticfiles.finders.AppDirectoriesFinder",
209 "compressor.finders.CompressorFinder",
210 )
211
212 COMPRESS_CSS_FILTERS = [
213 "compressor.filters.css_default.CssAbsoluteFilter",
214 "compressor.filters.cssmin.CSSMinFilter",
215 "compressor.filters.template.TemplateFilter",
216 ]
217 COMPRESS_JS_FILTERS = [
218 "compressor.filters.jsmin.JSMinFilter",
219 ]
220 COMPRESS_PRECOMPILERS = (
221 ("module", "compressor_toolkit.precompilers.ES6Compiler"),
222 ("css", "compressor_toolkit.precompilers.SCSSCompiler"),
223 )
224 COMPRESS_ENABLED = False
225 COMPRESS_OFFLINE = True
226
227 # GVZ (Gemeindeverzeichnis) API URL
228 GVZ_API_URL = "http://gvz.integreat-app.de/api/"
229 GVZ_API_ENABLED = True
230
231 # Allow access to all domains by setting the following variable to TRUE
232 CORS_ORIGIN_ALLOW_ALL = True
233
234 # Extend default headers with development header to differenciate dev traffic in statistics
235 CORS_ALLOW_HEADERS = [
236 "accept",
237 "accept-encoding",
238 "authorization",
239 "content-type",
240 "dnt",
241 "origin",
242 "user-agent",
243 "x-csrftoken",
244 "x-requested-with",
245 "x-integreat-development",
246 ]
247
[end of src/backend/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/backend/settings.py b/src/backend/settings.py
--- a/src/backend/settings.py
+++ b/src/backend/settings.py
@@ -197,7 +197,9 @@
"loggers": {
"django": {"handlers": ["console"], "level": "WARN", "propagate": True,},
"api": {"handlers": ["console"], "level": "INFO", "propagate": True,},
+ "backend": {"handlers": ["console"], "level": "INFO", "propagate": True,},
"cms": {"handlers": ["console"], "level": "INFO", "propagate": True,},
+ "gvz_api": {"handlers": ["console"], "level": "INFO", "propagate": True,},
"rules": {"handlers": ["console"], "level": "DEBUG", "propagate": True,},
"auth": {"handlers": ["console", "authlog", "syslog"], "level": "INFO",},
},
@@ -225,7 +227,7 @@
COMPRESS_OFFLINE = True
# GVZ (Gemeindeverzeichnis) API URL
-GVZ_API_URL = "http://gvz.integreat-app.de/api/"
+GVZ_API_URL = "https://gvz.integreat-app.de/api/"
GVZ_API_ENABLED = True
# Allow access to all domains by setting the following variable to TRUE
diff --git a/src/gvz_api/apps.py b/src/gvz_api/apps.py
--- a/src/gvz_api/apps.py
+++ b/src/gvz_api/apps.py
@@ -1,6 +1,7 @@
"""
Configuration of GVZ API app
"""
+import sys
import logging
import json
import requests
@@ -22,25 +23,25 @@
"""
Checking if API is available
"""
- if settings.GVZ_API_ENABLED:
- try:
- response = requests.get(
- f"{settings.GVZ_API_URL}/search/expect_empty_json"
- )
- json.loads(response.text)
- except json.decoder.JSONDecodeError:
- self.api_available = False
- except requests.exceptions.RequestException:
- self.api_available = False
+ # Only check availability if current command is "runserver"
+ if sys.argv[1] == "runserver":
+ if settings.GVZ_API_ENABLED:
+ try:
+ response = requests.get(
+ f"{settings.GVZ_API_URL}/search/expect_empty_json", timeout=3
+ )
+ json.loads(response.text)
+ except (
+ json.decoder.JSONDecodeError,
+ requests.exceptions.RequestException,
+ requests.exceptions.Timeout,
+ ):
+ logger.info(
+ "GVZ API is not available. You won't be able to "
+ "automatically import coordinates and region aliases."
+ )
+ else:
+ self.api_available = True
+ logger.debug("GVZ API is available.")
else:
- self.api_available = True
- else:
- self.api_available = False
- if not self.api_available:
- logger.info(
- "GVZ API is not available. You won't be able to "
- "automatically import coordinates and region aliases."
- )
- else:
- self.api_available = True
- logger.info("GVZ API is available.")
+ logger.debug("GVZ API is not enabled.")
| {"golden_diff": "diff --git a/src/backend/settings.py b/src/backend/settings.py\n--- a/src/backend/settings.py\n+++ b/src/backend/settings.py\n@@ -197,7 +197,9 @@\n \"loggers\": {\n \"django\": {\"handlers\": [\"console\"], \"level\": \"WARN\", \"propagate\": True,},\n \"api\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": True,},\n+ \"backend\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": True,},\n \"cms\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": True,},\n+ \"gvz_api\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": True,},\n \"rules\": {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": True,},\n \"auth\": {\"handlers\": [\"console\", \"authlog\", \"syslog\"], \"level\": \"INFO\",},\n },\n@@ -225,7 +227,7 @@\n COMPRESS_OFFLINE = True\n \n # GVZ (Gemeindeverzeichnis) API URL\n-GVZ_API_URL = \"http://gvz.integreat-app.de/api/\"\n+GVZ_API_URL = \"https://gvz.integreat-app.de/api/\"\n GVZ_API_ENABLED = True\n \n # Allow access to all domains by setting the following variable to TRUE\ndiff --git a/src/gvz_api/apps.py b/src/gvz_api/apps.py\n--- a/src/gvz_api/apps.py\n+++ b/src/gvz_api/apps.py\n@@ -1,6 +1,7 @@\n \"\"\"\n Configuration of GVZ API app\n \"\"\"\n+import sys\n import logging\n import json\n import requests\n@@ -22,25 +23,25 @@\n \"\"\"\n Checking if API is available\n \"\"\"\n- if settings.GVZ_API_ENABLED:\n- try:\n- response = requests.get(\n- f\"{settings.GVZ_API_URL}/search/expect_empty_json\"\n- )\n- json.loads(response.text)\n- except json.decoder.JSONDecodeError:\n- self.api_available = False\n- except requests.exceptions.RequestException:\n- self.api_available = False\n+ # Only check availability if current command is \"runserver\"\n+ if sys.argv[1] == \"runserver\":\n+ if settings.GVZ_API_ENABLED:\n+ try:\n+ response = requests.get(\n+ f\"{settings.GVZ_API_URL}/search/expect_empty_json\", timeout=3\n+ )\n+ json.loads(response.text)\n+ except (\n+ json.decoder.JSONDecodeError,\n+ requests.exceptions.RequestException,\n+ requests.exceptions.Timeout,\n+ ):\n+ logger.info(\n+ \"GVZ API is not available. You won't be able to \"\n+ \"automatically import coordinates and region aliases.\"\n+ )\n+ else:\n+ self.api_available = True\n+ logger.debug(\"GVZ API is available.\")\n else:\n- self.api_available = True\n- else:\n- self.api_available = False\n- if not self.api_available:\n- logger.info(\n- \"GVZ API is not available. You won't be able to \"\n- \"automatically import coordinates and region aliases.\"\n- )\n- else:\n- self.api_available = True\n- logger.info(\"GVZ API is available.\")\n+ logger.debug(\"GVZ API is not enabled.\")\n", "issue": "Development server not starting when GVZ API not reachable\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen the [GVZ API](http://gvz.integreat-app.de/api/search/expect_empty_json) is reachable, but does not answer, the development server hangs during start, because there is no timeout defined for the api request [here](https://github.com/Integreat/cms-django/blob/f4f4c698575e975655b1333f630b5f0d6e4e034f/src/gvz_api/apps.py#L27).\r\n\r\n### Steps to Reproduce\r\n\r\n1. Execute `./dev-tools/run.sh` when GVZ API is reachable but does not answer (tested this morning)\r\n2. Wait\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe server should start and print a log message that the gvz api is not available\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nThe server does not start\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\nDevelopment server not starting when GVZ API not reachable\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen the [GVZ API](http://gvz.integreat-app.de/api/search/expect_empty_json) is reachable, but does not answer, the development server hangs during start, because there is no timeout defined for the api request [here](https://github.com/Integreat/cms-django/blob/f4f4c698575e975655b1333f630b5f0d6e4e034f/src/gvz_api/apps.py#L27).\r\n\r\n### Steps to Reproduce\r\n\r\n1. Execute `./dev-tools/run.sh` when GVZ API is reachable but does not answer (tested this morning)\r\n2. Wait\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe server should start and print a log message that the gvz api is not available\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nThe server does not start\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nConfiguration of GVZ API app\n\"\"\"\nimport logging\nimport json\nimport requests\nfrom django.apps import AppConfig\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass GvzApiConfig(AppConfig):\n \"\"\"\n GVZ API config inheriting the django AppConfig\n \"\"\"\n\n name = \"gvz_api\"\n api_available = False\n\n def ready(self):\n \"\"\"\n Checking if API is available\n \"\"\"\n if settings.GVZ_API_ENABLED:\n try:\n response = requests.get(\n f\"{settings.GVZ_API_URL}/search/expect_empty_json\"\n )\n json.loads(response.text)\n except json.decoder.JSONDecodeError:\n self.api_available = False\n except requests.exceptions.RequestException:\n self.api_available = False\n else:\n self.api_available = True\n else:\n self.api_available = False\n if not self.api_available:\n logger.info(\n \"GVZ API is not available. You won't be able to \"\n \"automatically import coordinates and region aliases.\"\n )\n else:\n self.api_available = True\n logger.info(\"GVZ API is available.\")\n", "path": "src/gvz_api/apps.py"}, {"content": "\"\"\"\nDjango settings for backend project.\n\nFor more information on this file, see :doc:`topics/settings`.\nFor the full list of settings and their values, see :doc:`ref/settings`.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Version number\nVERSION = \"0.0.14\"\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_\"\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\", \"0.0.0.0\"]\n\n# Needed for webauthn (this is a setting in case the application runs behind a proxy)\nHOSTNAME = \"localhost\"\nBASE_URL = \"http://localhost:8000\"\n\n# Application definition\n\nINSTALLED_APPS = [\n \"cms.apps.CmsConfig\",\n \"gvz_api.apps.GvzApiConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"compressor\",\n \"compressor_toolkit\",\n \"corsheaders\",\n \"widget_tweaks\",\n \"easy_thumbnails\",\n \"filer\",\n \"mptt\",\n \"rules.apps.AutodiscoverRulesConfig\",\n]\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"backend.urls\"\nTHUMBNAIL_HIGH_RESOLUTION = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"backend.context_processors.region_slug_processor\",\n ],\n \"debug\": DEBUG,\n },\n },\n]\n\nWSGI_APPLICATION = \"backend.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": \"integreat\",\n \"USER\": \"integreat\",\n \"PASSWORD\": \"password\",\n \"HOST\": \"localhost\",\n \"PORT\": \"5432\",\n }\n}\n\n# Directory for initial database contents\n\nFIXTURE_DIRS = (os.path.join(BASE_DIR, \"cms/fixtures/\"),)\n\n# Authentication backends\n\nAUTHENTICATION_BACKENDS = (\n \"rules.permissions.ObjectPermissionBackend\",\n \"django.contrib.auth.backends.ModelBackend\", # this is default\n)\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",},\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\nLANGUAGES = (\n (\"en-us\", \"English\"),\n (\"de-de\", \"Deutsch\"),\n)\n\nLOCALE_PATHS = (os.path.join(BASE_DIR, \"locale\"),)\n\nLANGUAGE_CODE = \"de-de\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"../node_modules\"),\n]\nSTATIC_URL = \"/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"cms/static/\")\n\n# Login\nLOGIN_URL = \"/login\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/login\"\n\n# Miscellaneous\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\nCSRF_FAILURE_VIEW = \"cms.views.error_handler.csrf_failure\"\n\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nFILER_CANONICAL_URL = \"media/\"\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\"format\": \"INTEGREAT CMS - %(levelname)s: %(message)s\",},\n \"console\": {\n \"format\": \"%(asctime)s INTEGREAT CMS - %(levelname)s: %(message)s\",\n \"datefmt\": \"%b %d %H:%M:%S\",\n },\n },\n \"handlers\": {\n \"console\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"console\"},\n \"authlog\": {\n \"level\": \"INFO\",\n \"class\": \"logging.handlers.SysLogHandler\",\n \"address\": \"/dev/log\",\n \"facility\": \"auth\",\n \"formatter\": \"default\",\n },\n \"syslog\": {\n \"level\": \"INFO\",\n \"class\": \"logging.handlers.SysLogHandler\",\n \"address\": \"/dev/log\",\n \"facility\": \"syslog\",\n \"formatter\": \"default\",\n },\n },\n \"loggers\": {\n \"django\": {\"handlers\": [\"console\"], \"level\": \"WARN\", \"propagate\": True,},\n \"api\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": True,},\n \"cms\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": True,},\n \"rules\": {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": True,},\n \"auth\": {\"handlers\": [\"console\", \"authlog\", \"syslog\"], \"level\": \"INFO\",},\n },\n}\n\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"compressor.finders.CompressorFinder\",\n)\n\nCOMPRESS_CSS_FILTERS = [\n \"compressor.filters.css_default.CssAbsoluteFilter\",\n \"compressor.filters.cssmin.CSSMinFilter\",\n \"compressor.filters.template.TemplateFilter\",\n]\nCOMPRESS_JS_FILTERS = [\n \"compressor.filters.jsmin.JSMinFilter\",\n]\nCOMPRESS_PRECOMPILERS = (\n (\"module\", \"compressor_toolkit.precompilers.ES6Compiler\"),\n (\"css\", \"compressor_toolkit.precompilers.SCSSCompiler\"),\n)\nCOMPRESS_ENABLED = False\nCOMPRESS_OFFLINE = True\n\n# GVZ (Gemeindeverzeichnis) API URL\nGVZ_API_URL = \"http://gvz.integreat-app.de/api/\"\nGVZ_API_ENABLED = True\n\n# Allow access to all domains by setting the following variable to TRUE\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Extend default headers with development header to differenciate dev traffic in statistics\nCORS_ALLOW_HEADERS = [\n \"accept\",\n \"accept-encoding\",\n \"authorization\",\n \"content-type\",\n \"dnt\",\n \"origin\",\n \"user-agent\",\n \"x-csrftoken\",\n \"x-requested-with\",\n \"x-integreat-development\",\n]\n", "path": "src/backend/settings.py"}]} | 3,799 | 749 |
gh_patches_debug_12356 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2514 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UX and frontend implementation for upgrades
### Notes
- [Current Figma design](https://www.figma.com/file/xHb5oIqye3fnXtb2heRH34/Styling?node-id=3804%3A28864&t=HyNupYmgZ9PqjEGr-0)
- [Issue with user flow](https://github.com/centerofci/mathesar/issues/227)
### Tasks
- Finalize the user flow
- Request Figma UX changes if needed
- Implement the frontend (create additional GitHub issues if needed)
### Feasibility
Watchtower, our docker image upgrade backend, doesn't report progress. The only progress reporting available to the frontend will be periodically calling some HTTP endpoint on the service container to check whether it is online or not.
The final UX should take this into account.
</issue>
<code>
[start of mathesar/urls.py]
1 from django.contrib.auth.views import LoginView
2 from django.urls import include, path, re_path
3 from rest_framework_nested import routers
4
5 from mathesar import views
6 from mathesar.api.db import viewsets as db_viewsets
7 from mathesar.api.ui import viewsets as ui_viewsets
8 from mathesar.users.password_reset import MathesarPasswordResetConfirmView
9
10 db_router = routers.DefaultRouter()
11 db_router.register(r'tables', db_viewsets.TableViewSet, basename='table')
12 db_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')
13 db_router.register(r'links', db_viewsets.LinkViewSet, basename='links')
14 db_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')
15 db_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')
16 db_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')
17
18 db_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')
19 db_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')
20 db_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')
21 db_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')
22 db_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')
23
24 ui_router = routers.DefaultRouter()
25 ui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version')
26 ui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')
27 ui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')
28 ui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')
29 ui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')
30
31 urlpatterns = [
32 path('api/db/v0/', include(db_router.urls)),
33 path('api/db/v0/', include(db_table_router.urls)),
34 path('api/ui/v0/', include(ui_router.urls)),
35 path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),
36 path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'),
37 path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'),
38 path('auth/', include('django.contrib.auth.urls')),
39 path('', views.home, name='home'),
40 path('profile/', views.profile, name='profile'),
41 path('administration/', views.admin_home, name='admin_home'),
42 path('administration/users/', views.admin_home, name='admin_users_home'),
43 path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),
44 path('administration/general/', views.admin_home, name='admin_general'),
45 path('<db_name>/', views.schemas, name='schemas'),
46 re_path(
47 r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/',
48 views.schema_home,
49 name='schema_home'
50 ),
51 ]
52
[end of mathesar/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/urls.py b/mathesar/urls.py
--- a/mathesar/urls.py
+++ b/mathesar/urls.py
@@ -41,7 +41,7 @@
path('administration/', views.admin_home, name='admin_home'),
path('administration/users/', views.admin_home, name='admin_users_home'),
path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),
- path('administration/general/', views.admin_home, name='admin_general'),
+ path('administration/update/', views.admin_home, name='admin_update'),
path('<db_name>/', views.schemas, name='schemas'),
re_path(
r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/',
| {"golden_diff": "diff --git a/mathesar/urls.py b/mathesar/urls.py\n--- a/mathesar/urls.py\n+++ b/mathesar/urls.py\n@@ -41,7 +41,7 @@\n path('administration/', views.admin_home, name='admin_home'),\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n- path('administration/general/', views.admin_home, name='admin_general'),\n+ path('administration/update/', views.admin_home, name='admin_update'),\n path('<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n", "issue": "UX and frontend implementation for upgrades\n### Notes\r\n- [Current Figma design](https://www.figma.com/file/xHb5oIqye3fnXtb2heRH34/Styling?node-id=3804%3A28864&t=HyNupYmgZ9PqjEGr-0)\r\n- [Issue with user flow](https://github.com/centerofci/mathesar/issues/227)\r\n\r\n### Tasks\r\n- Finalize the user flow\r\n- Request Figma UX changes if needed\r\n- Implement the frontend (create additional GitHub issues if needed)\r\n\r\n### Feasibility \r\nWatchtower, our docker image upgrade backend, doesn't report progress. The only progress reporting available to the frontend will be periodically calling some HTTP endpoint on the service container to check whether it is online or not.\r\n\r\nThe final UX should take this into account.\n", "before_files": [{"content": "from django.contrib.auth.views import LoginView\nfrom django.urls import include, path, re_path\nfrom rest_framework_nested import routers\n\nfrom mathesar import views\nfrom mathesar.api.db import viewsets as db_viewsets\nfrom mathesar.api.ui import viewsets as ui_viewsets\nfrom mathesar.users.password_reset import MathesarPasswordResetConfirmView\n\ndb_router = routers.DefaultRouter()\ndb_router.register(r'tables', db_viewsets.TableViewSet, basename='table')\ndb_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')\ndb_router.register(r'links', db_viewsets.LinkViewSet, basename='links')\ndb_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')\ndb_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')\ndb_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')\n\ndb_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\ndb_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')\ndb_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')\ndb_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')\ndb_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')\n\nui_router = routers.DefaultRouter()\nui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version')\nui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')\nui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')\nui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')\nui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')\n\nurlpatterns = [\n path('api/db/v0/', include(db_router.urls)),\n path('api/db/v0/', include(db_table_router.urls)),\n path('api/ui/v0/', include(ui_router.urls)),\n path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),\n path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'),\n path('auth/', include('django.contrib.auth.urls')),\n path('', views.home, name='home'),\n path('profile/', views.profile, name='profile'),\n path('administration/', views.admin_home, name='admin_home'),\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n path('administration/general/', views.admin_home, name='admin_general'),\n path('<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n]\n", "path": "mathesar/urls.py"}]} | 1,462 | 165 |
gh_patches_debug_19852 | rasdani/github-patches | git_diff | open-mmlab__mmcv-256 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Deadlock 'fix' introduced deadlock.
Since https://github.com/open-mmlab/mmcv/pull/252 is merged my mmdetection code hangs after evaluation. After reverting the specific commit `git revert c203419f57c2e25ab4307420b9a3688f99e01dea`, my code runs again as expected..
</issue>
<code>
[start of mmcv/runner/hooks/logger/text.py]
1 # Copyright (c) Open-MMLab. All rights reserved.
2 import datetime
3 import os.path as osp
4 from collections import OrderedDict
5
6 import torch
7 import torch.distributed as dist
8
9 import mmcv
10 from ..hook import HOOKS
11 from .base import LoggerHook
12
13
14 @HOOKS.register_module
15 class TextLoggerHook(LoggerHook):
16
17 def __init__(self, interval=10, ignore_last=True, reset_flag=False):
18 super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag)
19 self.time_sec_tot = 0
20
21 def before_run(self, runner):
22 super(TextLoggerHook, self).before_run(runner)
23 self.start_iter = runner.iter
24 self.json_log_path = osp.join(runner.work_dir,
25 f'{runner.timestamp}.log.json')
26 if runner.meta is not None:
27 self._dump_log(runner.meta, runner)
28
29 def _get_max_memory(self, runner):
30 mem = torch.cuda.max_memory_allocated()
31 mem_mb = torch.tensor([mem / (1024 * 1024)],
32 dtype=torch.int,
33 device=torch.device('cuda'))
34 if runner.world_size > 1:
35 dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
36 return mem_mb.item()
37
38 def _log_info(self, log_dict, runner):
39 if runner.mode == 'train':
40 log_str = f'Epoch [{log_dict["epoch"]}]' \
41 f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' \
42 f'lr: {log_dict["lr"]:.5f}, '
43 if 'time' in log_dict.keys():
44 self.time_sec_tot += (log_dict['time'] * self.interval)
45 time_sec_avg = self.time_sec_tot / (
46 runner.iter - self.start_iter + 1)
47 eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
48 eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
49 log_str += f'eta: {eta_str}, '
50 log_str += f'time: {log_dict["time"]:.3f}, ' \
51 f'data_time: {log_dict["data_time"]:.3f}, '
52 # statistic memory
53 if torch.cuda.is_available():
54 log_str += f'memory: {log_dict["memory"]}, '
55 else:
56 log_str = 'Epoch({log_dict["mode"]}) ' \
57 f'[{log_dict["epoch"] - 1}][{log_dict["iter"]}]\t'
58 log_items = []
59 for name, val in log_dict.items():
60 # TODO: resolve this hack
61 # these items have been in log_str
62 if name in [
63 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
64 'memory', 'epoch'
65 ]:
66 continue
67 if isinstance(val, float):
68 val = f'{val:.4f}'
69 log_items.append(f'{name}: {val}')
70 log_str += ', '.join(log_items)
71 runner.logger.info(log_str)
72
73 def _dump_log(self, log_dict, runner):
74 # dump log in json format
75 json_log = OrderedDict()
76 for k, v in log_dict.items():
77 json_log[k] = self._round_float(v)
78 # only append log at last line
79 if runner.rank == 0:
80 with open(self.json_log_path, 'a+') as f:
81 mmcv.dump(json_log, f, file_format='json')
82 f.write('\n')
83
84 def _round_float(self, items):
85 if isinstance(items, list):
86 return [self._round_float(item) for item in items]
87 elif isinstance(items, float):
88 return round(items, 5)
89 else:
90 return items
91
92 def log(self, runner):
93 log_dict = OrderedDict()
94 # training mode if the output contains the key "time"
95 mode = 'train' if 'time' in runner.log_buffer.output else 'val'
96 log_dict['mode'] = mode
97 log_dict['epoch'] = runner.epoch + 1
98 log_dict['iter'] = runner.inner_iter + 1
99 # only record lr of the first param group
100 log_dict['lr'] = runner.current_lr()[0]
101 memory = None
102 if torch.cuda.is_available():
103 memory = self._get_max_memory(runner)
104 if mode == 'train':
105 log_dict['time'] = runner.log_buffer.output['time']
106 log_dict['data_time'] = runner.log_buffer.output['data_time']
107
108 # statistic memory
109 if memory is not None:
110 log_dict['memory'] = memory
111 for name, val in runner.log_buffer.output.items():
112 if name in ['time', 'data_time']:
113 continue
114 log_dict[name] = val
115
116 self._log_info(log_dict, runner)
117 self._dump_log(log_dict, runner)
118
[end of mmcv/runner/hooks/logger/text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmcv/runner/hooks/logger/text.py b/mmcv/runner/hooks/logger/text.py
--- a/mmcv/runner/hooks/logger/text.py
+++ b/mmcv/runner/hooks/logger/text.py
@@ -98,16 +98,13 @@
log_dict['iter'] = runner.inner_iter + 1
# only record lr of the first param group
log_dict['lr'] = runner.current_lr()[0]
- memory = None
- if torch.cuda.is_available():
- memory = self._get_max_memory(runner)
if mode == 'train':
log_dict['time'] = runner.log_buffer.output['time']
log_dict['data_time'] = runner.log_buffer.output['data_time']
# statistic memory
- if memory is not None:
- log_dict['memory'] = memory
+ if torch.cuda.is_available():
+ log_dict['memory'] = self._get_max_memory(runner)
for name, val in runner.log_buffer.output.items():
if name in ['time', 'data_time']:
continue
| {"golden_diff": "diff --git a/mmcv/runner/hooks/logger/text.py b/mmcv/runner/hooks/logger/text.py\n--- a/mmcv/runner/hooks/logger/text.py\n+++ b/mmcv/runner/hooks/logger/text.py\n@@ -98,16 +98,13 @@\n log_dict['iter'] = runner.inner_iter + 1\n # only record lr of the first param group\n log_dict['lr'] = runner.current_lr()[0]\n- memory = None\n- if torch.cuda.is_available():\n- memory = self._get_max_memory(runner)\n if mode == 'train':\n log_dict['time'] = runner.log_buffer.output['time']\n log_dict['data_time'] = runner.log_buffer.output['data_time']\n \n # statistic memory\n- if memory is not None:\n- log_dict['memory'] = memory\n+ if torch.cuda.is_available():\n+ log_dict['memory'] = self._get_max_memory(runner)\n for name, val in runner.log_buffer.output.items():\n if name in ['time', 'data_time']:\n continue\n", "issue": "Bug: Deadlock 'fix' introduced deadlock.\nSince https://github.com/open-mmlab/mmcv/pull/252 is merged my mmdetection code hangs after evaluation. After reverting the specific commit `git revert c203419f57c2e25ab4307420b9a3688f99e01dea`, my code runs again as expected..\n", "before_files": [{"content": "# Copyright (c) Open-MMLab. All rights reserved.\nimport datetime\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport torch\nimport torch.distributed as dist\n\nimport mmcv\nfrom ..hook import HOOKS\nfrom .base import LoggerHook\n\n\[email protected]_module\nclass TextLoggerHook(LoggerHook):\n\n def __init__(self, interval=10, ignore_last=True, reset_flag=False):\n super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag)\n self.time_sec_tot = 0\n\n def before_run(self, runner):\n super(TextLoggerHook, self).before_run(runner)\n self.start_iter = runner.iter\n self.json_log_path = osp.join(runner.work_dir,\n f'{runner.timestamp}.log.json')\n if runner.meta is not None:\n self._dump_log(runner.meta, runner)\n\n def _get_max_memory(self, runner):\n mem = torch.cuda.max_memory_allocated()\n mem_mb = torch.tensor([mem / (1024 * 1024)],\n dtype=torch.int,\n device=torch.device('cuda'))\n if runner.world_size > 1:\n dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)\n return mem_mb.item()\n\n def _log_info(self, log_dict, runner):\n if runner.mode == 'train':\n log_str = f'Epoch [{log_dict[\"epoch\"]}]' \\\n f'[{log_dict[\"iter\"]}/{len(runner.data_loader)}]\\t' \\\n f'lr: {log_dict[\"lr\"]:.5f}, '\n if 'time' in log_dict.keys():\n self.time_sec_tot += (log_dict['time'] * self.interval)\n time_sec_avg = self.time_sec_tot / (\n runner.iter - self.start_iter + 1)\n eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)\n eta_str = str(datetime.timedelta(seconds=int(eta_sec)))\n log_str += f'eta: {eta_str}, '\n log_str += f'time: {log_dict[\"time\"]:.3f}, ' \\\n f'data_time: {log_dict[\"data_time\"]:.3f}, '\n # statistic memory\n if torch.cuda.is_available():\n log_str += f'memory: {log_dict[\"memory\"]}, '\n else:\n log_str = 'Epoch({log_dict[\"mode\"]}) ' \\\n f'[{log_dict[\"epoch\"] - 1}][{log_dict[\"iter\"]}]\\t'\n log_items = []\n for name, val in log_dict.items():\n # TODO: resolve this hack\n # these items have been in log_str\n if name in [\n 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',\n 'memory', 'epoch'\n ]:\n continue\n if isinstance(val, float):\n val = f'{val:.4f}'\n log_items.append(f'{name}: {val}')\n log_str += ', '.join(log_items)\n runner.logger.info(log_str)\n\n def _dump_log(self, log_dict, runner):\n # dump log in json format\n json_log = OrderedDict()\n for k, v in log_dict.items():\n json_log[k] = self._round_float(v)\n # only append log at last line\n if runner.rank == 0:\n with open(self.json_log_path, 'a+') as f:\n mmcv.dump(json_log, f, file_format='json')\n f.write('\\n')\n\n def _round_float(self, items):\n if isinstance(items, list):\n return [self._round_float(item) for item in items]\n elif isinstance(items, float):\n return round(items, 5)\n else:\n return items\n\n def log(self, runner):\n log_dict = OrderedDict()\n # training mode if the output contains the key \"time\"\n mode = 'train' if 'time' in runner.log_buffer.output else 'val'\n log_dict['mode'] = mode\n log_dict['epoch'] = runner.epoch + 1\n log_dict['iter'] = runner.inner_iter + 1\n # only record lr of the first param group\n log_dict['lr'] = runner.current_lr()[0]\n memory = None\n if torch.cuda.is_available():\n memory = self._get_max_memory(runner)\n if mode == 'train':\n log_dict['time'] = runner.log_buffer.output['time']\n log_dict['data_time'] = runner.log_buffer.output['data_time']\n\n # statistic memory\n if memory is not None:\n log_dict['memory'] = memory\n for name, val in runner.log_buffer.output.items():\n if name in ['time', 'data_time']:\n continue\n log_dict[name] = val\n\n self._log_info(log_dict, runner)\n self._dump_log(log_dict, runner)\n", "path": "mmcv/runner/hooks/logger/text.py"}]} | 1,943 | 237 |
gh_patches_debug_15503 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-16157 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ERROR: Unable to extract video data
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)
- Use the *Preview* tab to see what your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.04.03*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [X] I've **verified** and **I assure** that I'm running youtube-dl **2018.04.03**
### Before submitting an *issue* make sure you have:
- [X] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [X] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
- [X] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser
### What is the purpose of your *issue*?
- [X] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
---
### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*
---
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
```
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'-v', u'http://www.fxnetworks.com/video/1199474243732']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2018.04.03
[debug] Python version 2.7.13 (CPython) - Linux-4.4.114-42-default-x86_64-with-SuSE-42.3-x86_64
[debug] exe versions: ffmpeg 3.4.1, ffprobe 3.4.1, rtmpdump 2.4
[debug] Proxy map: {}
[FXNetworks] 1199474243732: Downloading webpage
ERROR: Unable to extract video data; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/home/user/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 785, in extract_info
ie_result = ie.extract(url)
File "/home/user/bin/youtube-dl/youtube_dl/extractor/common.py", line 440, in extract
ie_result = self._real_extract(url)
File "/home/user/bin/youtube-dl/youtube_dl/extractor/fxnetworks.py", line 44, in _real_extract
r'(<a.+?rel="http://link\.theplatform\.com/s/.+?</a>)', webpage, 'video data'))
File "/home/user/bin/youtube-dl/youtube_dl/extractor/common.py", line 808, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
RegexNotFoundError: Unable to extract video data; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
```
---
### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
- Single video: https://youtu.be/BaW_jenozKc
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
Note that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
---
### Description of your *issue*, suggested solution and other information
Explanation of your *issue* in arbitrary form goes here. Please make sure the [description is worded well enough to be understood](https://github.com/rg3/youtube-dl#is-the-description-of-the-issue-itself-sufficient). Provide as much context and examples as possible.
If work on your *issue* requires account credentials please provide them or explain how one can obtain them.
</issue>
<code>
[start of youtube_dl/extractor/fxnetworks.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .adobepass import AdobePassIE
5 from ..utils import (
6 extract_attributes,
7 int_or_none,
8 parse_age_limit,
9 smuggle_url,
10 update_url_query,
11 )
12
13
14 class FXNetworksIE(AdobePassIE):
15 _VALID_URL = r'https?://(?:www\.)?(?:fxnetworks|simpsonsworld)\.com/video/(?P<id>\d+)'
16 _TESTS = [{
17 'url': 'http://www.fxnetworks.com/video/1032565827847',
18 'md5': '8d99b97b4aa7a202f55b6ed47ea7e703',
19 'info_dict': {
20 'id': 'dRzwHC_MMqIv',
21 'ext': 'mp4',
22 'title': 'First Look: Better Things - Season 2',
23 'description': 'Because real life is like a fart. Watch this FIRST LOOK to see what inspired the new season of Better Things.',
24 'age_limit': 14,
25 'uploader': 'NEWA-FNG-FX',
26 'upload_date': '20170825',
27 'timestamp': 1503686274,
28 'episode_number': 0,
29 'season_number': 2,
30 'series': 'Better Things',
31 },
32 'add_ie': ['ThePlatform'],
33 }, {
34 'url': 'http://www.simpsonsworld.com/video/716094019682',
35 'only_matching': True,
36 }]
37
38 def _real_extract(self, url):
39 video_id = self._match_id(url)
40 webpage = self._download_webpage(url, video_id)
41 if 'The content you are trying to access is not available in your region.' in webpage:
42 self.raise_geo_restricted()
43 video_data = extract_attributes(self._search_regex(
44 r'(<a.+?rel="http://link\.theplatform\.com/s/.+?</a>)', webpage, 'video data'))
45 player_type = self._search_regex(r'playerType\s*=\s*[\'"]([^\'"]+)', webpage, 'player type', default=None)
46 release_url = video_data['rel']
47 title = video_data['data-title']
48 rating = video_data.get('data-rating')
49 query = {
50 'mbr': 'true',
51 }
52 if player_type == 'movies':
53 query.update({
54 'manifest': 'm3u',
55 })
56 else:
57 query.update({
58 'switch': 'http',
59 })
60 if video_data.get('data-req-auth') == '1':
61 resource = self._get_mvpd_resource(
62 video_data['data-channel'], title,
63 video_data.get('data-guid'), rating)
64 query['auth'] = self._extract_mvpd_auth(url, video_id, 'fx', resource)
65
66 return {
67 '_type': 'url_transparent',
68 'id': video_id,
69 'title': title,
70 'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}),
71 'series': video_data.get('data-show-title'),
72 'episode_number': int_or_none(video_data.get('data-episode')),
73 'season_number': int_or_none(video_data.get('data-season')),
74 'thumbnail': video_data.get('data-large-thumb'),
75 'age_limit': parse_age_limit(rating),
76 'ie_key': 'ThePlatform',
77 }
78
[end of youtube_dl/extractor/fxnetworks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/fxnetworks.py b/youtube_dl/extractor/fxnetworks.py
--- a/youtube_dl/extractor/fxnetworks.py
+++ b/youtube_dl/extractor/fxnetworks.py
@@ -41,7 +41,7 @@
if 'The content you are trying to access is not available in your region.' in webpage:
self.raise_geo_restricted()
video_data = extract_attributes(self._search_regex(
- r'(<a.+?rel="http://link\.theplatform\.com/s/.+?</a>)', webpage, 'video data'))
+ r'(<a.+?rel="https?://link\.theplatform\.com/s/.+?</a>)', webpage, 'video data'))
player_type = self._search_regex(r'playerType\s*=\s*[\'"]([^\'"]+)', webpage, 'player type', default=None)
release_url = video_data['rel']
title = video_data['data-title']
| {"golden_diff": "diff --git a/youtube_dl/extractor/fxnetworks.py b/youtube_dl/extractor/fxnetworks.py\n--- a/youtube_dl/extractor/fxnetworks.py\n+++ b/youtube_dl/extractor/fxnetworks.py\n@@ -41,7 +41,7 @@\n if 'The content you are trying to access is not available in your region.' in webpage:\n self.raise_geo_restricted()\n video_data = extract_attributes(self._search_regex(\n- r'(<a.+?rel=\"http://link\\.theplatform\\.com/s/.+?</a>)', webpage, 'video data'))\n+ r'(<a.+?rel=\"https?://link\\.theplatform\\.com/s/.+?</a>)', webpage, 'video data'))\n player_type = self._search_regex(r'playerType\\s*=\\s*[\\'\"]([^\\'\"]+)', webpage, 'player type', default=None)\n release_url = video_data['rel']\n title = video_data['data-title']\n", "issue": "ERROR: Unable to extract video data\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.04.03*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [X] I've **verified** and **I assure** that I'm running youtube-dl **2018.04.03**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [X] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [X] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [X] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [X] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):\r\n\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'-v', u'http://www.fxnetworks.com/video/1199474243732']\r\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2018.04.03\r\n[debug] Python version 2.7.13 (CPython) - Linux-4.4.114-42-default-x86_64-with-SuSE-42.3-x86_64\r\n[debug] exe versions: ffmpeg 3.4.1, ffprobe 3.4.1, rtmpdump 2.4\r\n[debug] Proxy map: {}\r\n[FXNetworks] 1199474243732: Downloading webpage\r\nERROR: Unable to extract video data; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"/home/user/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 785, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"/home/user/bin/youtube-dl/youtube_dl/extractor/common.py\", line 440, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/home/user/bin/youtube-dl/youtube_dl/extractor/fxnetworks.py\", line 44, in _real_extract\r\n r'(<a.+?rel=\"http://link\\.theplatform\\.com/s/.+?</a>)', webpage, 'video data'))\r\n File \"/home/user/bin/youtube-dl/youtube_dl/extractor/common.py\", line 808, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\r\nRegexNotFoundError: Unable to extract video data; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n```\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n- Single video: https://www.youtube.com/watch?v=BaW_jenozKc\r\n- Single video: https://youtu.be/BaW_jenozKc\r\n- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc\r\n\r\nNote that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights.\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nExplanation of your *issue* in arbitrary form goes here. Please make sure the [description is worded well enough to be understood](https://github.com/rg3/youtube-dl#is-the-description-of-the-issue-itself-sufficient). Provide as much context and examples as possible.\r\nIf work on your *issue* requires account credentials please provide them or explain how one can obtain them.\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .adobepass import AdobePassIE\nfrom ..utils import (\n extract_attributes,\n int_or_none,\n parse_age_limit,\n smuggle_url,\n update_url_query,\n)\n\n\nclass FXNetworksIE(AdobePassIE):\n _VALID_URL = r'https?://(?:www\\.)?(?:fxnetworks|simpsonsworld)\\.com/video/(?P<id>\\d+)'\n _TESTS = [{\n 'url': 'http://www.fxnetworks.com/video/1032565827847',\n 'md5': '8d99b97b4aa7a202f55b6ed47ea7e703',\n 'info_dict': {\n 'id': 'dRzwHC_MMqIv',\n 'ext': 'mp4',\n 'title': 'First Look: Better Things - Season 2',\n 'description': 'Because real life is like a fart. Watch this FIRST LOOK to see what inspired the new season of Better Things.',\n 'age_limit': 14,\n 'uploader': 'NEWA-FNG-FX',\n 'upload_date': '20170825',\n 'timestamp': 1503686274,\n 'episode_number': 0,\n 'season_number': 2,\n 'series': 'Better Things',\n },\n 'add_ie': ['ThePlatform'],\n }, {\n 'url': 'http://www.simpsonsworld.com/video/716094019682',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n if 'The content you are trying to access is not available in your region.' in webpage:\n self.raise_geo_restricted()\n video_data = extract_attributes(self._search_regex(\n r'(<a.+?rel=\"http://link\\.theplatform\\.com/s/.+?</a>)', webpage, 'video data'))\n player_type = self._search_regex(r'playerType\\s*=\\s*[\\'\"]([^\\'\"]+)', webpage, 'player type', default=None)\n release_url = video_data['rel']\n title = video_data['data-title']\n rating = video_data.get('data-rating')\n query = {\n 'mbr': 'true',\n }\n if player_type == 'movies':\n query.update({\n 'manifest': 'm3u',\n })\n else:\n query.update({\n 'switch': 'http',\n })\n if video_data.get('data-req-auth') == '1':\n resource = self._get_mvpd_resource(\n video_data['data-channel'], title,\n video_data.get('data-guid'), rating)\n query['auth'] = self._extract_mvpd_auth(url, video_id, 'fx', resource)\n\n return {\n '_type': 'url_transparent',\n 'id': video_id,\n 'title': title,\n 'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}),\n 'series': video_data.get('data-show-title'),\n 'episode_number': int_or_none(video_data.get('data-episode')),\n 'season_number': int_or_none(video_data.get('data-season')),\n 'thumbnail': video_data.get('data-large-thumb'),\n 'age_limit': parse_age_limit(rating),\n 'ie_key': 'ThePlatform',\n }\n", "path": "youtube_dl/extractor/fxnetworks.py"}]} | 2,832 | 220 |
gh_patches_debug_4706 | rasdani/github-patches | git_diff | sherlock-project__sherlock-2109 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SHERLOCK// ERROR "string indices must be integers, not 'str' "
como puedo solucionar este error que me sale al querer usar sherlock, no se como solucionarlo la verdad
</issue>
<code>
[start of sherlock/sites.py]
1 """Sherlock Sites Information Module
2
3 This module supports storing information about websites.
4 This is the raw data that will be used to search for usernames.
5 """
6 import json
7 import requests
8 import secrets
9
10 class SiteInformation:
11 def __init__(self, name, url_home, url_username_format, username_claimed,
12 information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
13 """Create Site Information Object.
14
15 Contains information about a specific website.
16
17 Keyword Arguments:
18 self -- This object.
19 name -- String which identifies site.
20 url_home -- String containing URL for home of site.
21 url_username_format -- String containing URL for Username format
22 on site.
23 NOTE: The string should contain the
24 token "{}" where the username should
25 be substituted. For example, a string
26 of "https://somesite.com/users/{}"
27 indicates that the individual
28 usernames would show up under the
29 "https://somesite.com/users/" area of
30 the website.
31 username_claimed -- String containing username which is known
32 to be claimed on website.
33 username_unclaimed -- String containing username which is known
34 to be unclaimed on website.
35 information -- Dictionary containing all known information
36 about website.
37 NOTE: Custom information about how to
38 actually detect the existence of the
39 username will be included in this
40 dictionary. This information will
41 be needed by the detection method,
42 but it is only recorded in this
43 object for future use.
44 is_nsfw -- Boolean indicating if site is Not Safe For Work.
45
46 Return Value:
47 Nothing.
48 """
49
50 self.name = name
51 self.url_home = url_home
52 self.url_username_format = url_username_format
53
54 self.username_claimed = username_claimed
55 self.username_unclaimed = secrets.token_urlsafe(32)
56 self.information = information
57 self.is_nsfw = is_nsfw
58
59 return
60
61 def __str__(self):
62 """Convert Object To String.
63
64 Keyword Arguments:
65 self -- This object.
66
67 Return Value:
68 Nicely formatted string to get information about this object.
69 """
70
71 return f"{self.name} ({self.url_home})"
72
73
74 class SitesInformation:
75 def __init__(self, data_file_path=None):
76 """Create Sites Information Object.
77
78 Contains information about all supported websites.
79
80 Keyword Arguments:
81 self -- This object.
82 data_file_path -- String which indicates path to data file.
83 The file name must end in ".json".
84
85 There are 3 possible formats:
86 * Absolute File Format
87 For example, "c:/stuff/data.json".
88 * Relative File Format
89 The current working directory is used
90 as the context.
91 For example, "data.json".
92 * URL Format
93 For example,
94 "https://example.com/data.json", or
95 "http://example.com/data.json".
96
97 An exception will be thrown if the path
98 to the data file is not in the expected
99 format, or if there was any problem loading
100 the file.
101
102 If this option is not specified, then a
103 default site list will be used.
104
105 Return Value:
106 Nothing.
107 """
108
109 if not data_file_path:
110 # The default data file is the live data.json which is in the GitHub repo. The reason why we are using
111 # this instead of the local one is so that the user has the most up-to-date data. This prevents
112 # users from creating issue about false positives which has already been fixed or having outdated data
113 data_file_path = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/resources/data.json"
114
115 # Ensure that specified data file has correct extension.
116 if not data_file_path.lower().endswith(".json"):
117 raise FileNotFoundError(f"Incorrect JSON file extension for data file '{data_file_path}'.")
118
119 # if "http://" == data_file_path[:7].lower() or "https://" == data_file_path[:8].lower():
120 if data_file_path.lower().startswith("http"):
121 # Reference is to a URL.
122 try:
123 response = requests.get(url=data_file_path)
124 except Exception as error:
125 raise FileNotFoundError(
126 f"Problem while attempting to access data file URL '{data_file_path}': {error}"
127 )
128
129 if response.status_code != 200:
130 raise FileNotFoundError(f"Bad response while accessing "
131 f"data file URL '{data_file_path}'."
132 )
133 try:
134 site_data = response.json()
135 except Exception as error:
136 raise ValueError(
137 f"Problem parsing json contents at '{data_file_path}': {error}."
138 )
139
140 else:
141 # Reference is to a file.
142 try:
143 with open(data_file_path, "r", encoding="utf-8") as file:
144 try:
145 site_data = json.load(file)
146 except Exception as error:
147 raise ValueError(
148 f"Problem parsing json contents at '{data_file_path}': {error}."
149 )
150
151 except FileNotFoundError:
152 raise FileNotFoundError(f"Problem while attempting to access "
153 f"data file '{data_file_path}'."
154 )
155
156 try:
157 site_data.pop('$schema')
158 except:
159 pass
160
161 self.sites = {}
162
163 # Add all site information from the json file to internal site list.
164 for site_name in site_data:
165 try:
166
167 self.sites[site_name] = \
168 SiteInformation(site_name,
169 site_data[site_name]["urlMain"],
170 site_data[site_name]["url"],
171 site_data[site_name]["username_claimed"],
172 site_data[site_name],
173 site_data[site_name].get("isNSFW",False)
174
175 )
176 except KeyError as error:
177 raise ValueError(
178 f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}."
179 )
180
181 return
182
183 def remove_nsfw_sites(self):
184 """
185 Remove NSFW sites from the sites, if isNSFW flag is true for site
186
187 Keyword Arguments:
188 self -- This object.
189
190 Return Value:
191 None
192 """
193 sites = {}
194 for site in self.sites:
195 if self.sites[site].is_nsfw:
196 continue
197 sites[site] = self.sites[site]
198 self.sites = sites
199
200 def site_name_list(self):
201 """Get Site Name List.
202
203 Keyword Arguments:
204 self -- This object.
205
206 Return Value:
207 List of strings containing names of sites.
208 """
209
210 return sorted([site.name for site in self], key=str.lower)
211
212 def __iter__(self):
213 """Iterator For Object.
214
215 Keyword Arguments:
216 self -- This object.
217
218 Return Value:
219 Iterator for sites object.
220 """
221
222 for site_name in self.sites:
223 yield self.sites[site_name]
224
225 def __len__(self):
226 """Length For Object.
227
228 Keyword Arguments:
229 self -- This object.
230
231 Return Value:
232 Length of sites object.
233 """
234 return len(self.sites)
235
[end of sherlock/sites.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sherlock/sites.py b/sherlock/sites.py
--- a/sherlock/sites.py
+++ b/sherlock/sites.py
@@ -177,6 +177,8 @@
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}."
)
+ except TypeError as error:
+ print(f"Encountered TypeError parsing json contents for target '{site_name}' at {data_file_path}\nSkipping target.\n")
return
| {"golden_diff": "diff --git a/sherlock/sites.py b/sherlock/sites.py\n--- a/sherlock/sites.py\n+++ b/sherlock/sites.py\n@@ -177,6 +177,8 @@\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': Missing attribute {error}.\"\n )\n+ except TypeError as error:\n+ print(f\"Encountered TypeError parsing json contents for target '{site_name}' at {data_file_path}\\nSkipping target.\\n\")\n \n return\n", "issue": "SHERLOCK// ERROR \"string indices must be integers, not 'str' \"\ncomo puedo solucionar este error que me sale al querer usar sherlock, no se como solucionarlo la verdad\n", "before_files": [{"content": "\"\"\"Sherlock Sites Information Module\n\nThis module supports storing information about websites.\nThis is the raw data that will be used to search for usernames.\n\"\"\"\nimport json\nimport requests\nimport secrets\n\nclass SiteInformation:\n def __init__(self, name, url_home, url_username_format, username_claimed,\n information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):\n \"\"\"Create Site Information Object.\n\n Contains information about a specific website.\n\n Keyword Arguments:\n self -- This object.\n name -- String which identifies site.\n url_home -- String containing URL for home of site.\n url_username_format -- String containing URL for Username format\n on site.\n NOTE: The string should contain the\n token \"{}\" where the username should\n be substituted. For example, a string\n of \"https://somesite.com/users/{}\"\n indicates that the individual\n usernames would show up under the\n \"https://somesite.com/users/\" area of\n the website.\n username_claimed -- String containing username which is known\n to be claimed on website.\n username_unclaimed -- String containing username which is known\n to be unclaimed on website.\n information -- Dictionary containing all known information\n about website.\n NOTE: Custom information about how to\n actually detect the existence of the\n username will be included in this\n dictionary. This information will\n be needed by the detection method,\n but it is only recorded in this\n object for future use.\n is_nsfw -- Boolean indicating if site is Not Safe For Work.\n\n Return Value:\n Nothing.\n \"\"\"\n\n self.name = name\n self.url_home = url_home\n self.url_username_format = url_username_format\n\n self.username_claimed = username_claimed\n self.username_unclaimed = secrets.token_urlsafe(32)\n self.information = information\n self.is_nsfw = is_nsfw\n\n return\n\n def __str__(self):\n \"\"\"Convert Object To String.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Nicely formatted string to get information about this object.\n \"\"\"\n \n return f\"{self.name} ({self.url_home})\"\n\n\nclass SitesInformation:\n def __init__(self, data_file_path=None):\n \"\"\"Create Sites Information Object.\n\n Contains information about all supported websites.\n\n Keyword Arguments:\n self -- This object.\n data_file_path -- String which indicates path to data file.\n The file name must end in \".json\".\n\n There are 3 possible formats:\n * Absolute File Format\n For example, \"c:/stuff/data.json\".\n * Relative File Format\n The current working directory is used\n as the context.\n For example, \"data.json\".\n * URL Format\n For example,\n \"https://example.com/data.json\", or\n \"http://example.com/data.json\".\n\n An exception will be thrown if the path\n to the data file is not in the expected\n format, or if there was any problem loading\n the file.\n\n If this option is not specified, then a\n default site list will be used.\n\n Return Value:\n Nothing.\n \"\"\"\n\n if not data_file_path:\n # The default data file is the live data.json which is in the GitHub repo. The reason why we are using\n # this instead of the local one is so that the user has the most up-to-date data. This prevents\n # users from creating issue about false positives which has already been fixed or having outdated data\n data_file_path = \"https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/resources/data.json\"\n\n # Ensure that specified data file has correct extension.\n if not data_file_path.lower().endswith(\".json\"):\n raise FileNotFoundError(f\"Incorrect JSON file extension for data file '{data_file_path}'.\")\n\n # if \"http://\" == data_file_path[:7].lower() or \"https://\" == data_file_path[:8].lower():\n if data_file_path.lower().startswith(\"http\"):\n # Reference is to a URL.\n try:\n response = requests.get(url=data_file_path)\n except Exception as error:\n raise FileNotFoundError(\n f\"Problem while attempting to access data file URL '{data_file_path}': {error}\"\n )\n\n if response.status_code != 200:\n raise FileNotFoundError(f\"Bad response while accessing \"\n f\"data file URL '{data_file_path}'.\"\n )\n try:\n site_data = response.json()\n except Exception as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': {error}.\"\n )\n\n else:\n # Reference is to a file.\n try:\n with open(data_file_path, \"r\", encoding=\"utf-8\") as file:\n try:\n site_data = json.load(file)\n except Exception as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': {error}.\"\n )\n\n except FileNotFoundError:\n raise FileNotFoundError(f\"Problem while attempting to access \"\n f\"data file '{data_file_path}'.\"\n )\n \n try:\n site_data.pop('$schema')\n except:\n pass\n\n self.sites = {}\n\n # Add all site information from the json file to internal site list.\n for site_name in site_data:\n try:\n\n self.sites[site_name] = \\\n SiteInformation(site_name,\n site_data[site_name][\"urlMain\"],\n site_data[site_name][\"url\"],\n site_data[site_name][\"username_claimed\"],\n site_data[site_name],\n site_data[site_name].get(\"isNSFW\",False)\n\n )\n except KeyError as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': Missing attribute {error}.\"\n )\n\n return\n\n def remove_nsfw_sites(self):\n \"\"\"\n Remove NSFW sites from the sites, if isNSFW flag is true for site\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n None\n \"\"\"\n sites = {}\n for site in self.sites:\n if self.sites[site].is_nsfw:\n continue\n sites[site] = self.sites[site] \n self.sites = sites\n\n def site_name_list(self):\n \"\"\"Get Site Name List.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n List of strings containing names of sites.\n \"\"\"\n\n return sorted([site.name for site in self], key=str.lower)\n\n def __iter__(self):\n \"\"\"Iterator For Object.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Iterator for sites object.\n \"\"\"\n\n for site_name in self.sites:\n yield self.sites[site_name]\n\n def __len__(self):\n \"\"\"Length For Object.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Length of sites object.\n \"\"\"\n return len(self.sites)\n", "path": "sherlock/sites.py"}]} | 2,737 | 114 |
gh_patches_debug_28686 | rasdani/github-patches | git_diff | fossasia__open-event-server-4019 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Method to insert session-speaker relationship missing
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
**Current behavior:**
<!-- Describe how the bug manifests. -->
**Expected behavior:**
Session must be updated with related speakers and data inserted in speakers-session table.
**Steps to reproduce:**
<!-- If you are able to illustrate the bug or feature request with an example, please provide steps to reproduce -->
**Related code:**
```
insert any relevant code here else remove this section
```
**Other information:**
<!-- List any other information that is relevant to your issue. Stack traces, related issues, suggestions on how to fix, Stack Overflow links, forum links, etc. -->
**System information:**
</issue>
<code>
[start of app/api/speakers.py]
1 from marshmallow_jsonapi import fields
2 from marshmallow_jsonapi.flask import Schema, Relationship
3 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
4
5 from app.api.helpers.utilities import dasherize
6 from app.api.helpers.permissions import jwt_required, current_identity
7 from app.models import db
8 from app.models.speaker import Speaker
9 from app.models.session import Session
10 from app.models.user import User
11 from app.models.event import Event
12 from app.api.helpers.db import safe_query
13 from app.api.bootstrap import api
14
15
16 class SpeakerSchema(Schema):
17 """
18 Speaker Schema based on Speaker Model
19 """
20 class Meta:
21 """
22 Meta class for speaker schema
23 """
24 type_ = 'speaker'
25 self_view = 'v1.speaker_detail'
26 self_view_kwargs = {'id': '<id>'}
27 inflect = dasherize
28
29 id = fields.Str(dump_only=True)
30 name = fields.Str(required=True)
31 email = fields.Str(required=True)
32 photo_url = fields.Url(allow_none=True)
33 thumbnail_image_url = fields.Url(allow_none=True)
34 small_image_url = fields.Url(allow_none=True)
35 icon_image_url = fields.Url(allow_none=True)
36 short_biography = fields.Str(allow_none=True)
37 long_biography = fields.Str(allow_none=True)
38 speaking_experience = fields.Str(allow_none=True)
39 mobile = fields.Str(allow_none=True)
40 website = fields.Url(allow_none=True)
41 twitter = fields.Url(allow_none=True)
42 facebook = fields.Url(allow_none=True)
43 github = fields.Url(allow_none=True)
44 linkedin = fields.Url(allow_none=True)
45 organisation = fields.Str(allow_none=True)
46 is_featured = fields.Boolean(default=False)
47 position = fields.Str(allow_none=True)
48 country = fields.Str(allow_none=True)
49 city = fields.Str(allow_none=True)
50 gender = fields.Str(allow_none=True)
51 heard_from = fields.Str(allow_none=True)
52 sponsorship_required = fields.Str(allow_none=True)
53 event = Relationship(attribute='event',
54 self_view='v1.speaker_event',
55 self_view_kwargs={'id': '<id>'},
56 related_view='v1.event_detail',
57 related_view_kwargs={'speaker_id': '<id>'},
58 schema='EventSchema',
59 type_='event')
60 user = Relationship(attribute='user',
61 self_view='v1.speaker_user',
62 self_view_kwargs={'id': '<id>'},
63 related_view='v1.user_detail',
64 related_view_kwargs={'speaker_id': '<id>'},
65 schema='UserSchema',
66 type_='user')
67 sessions = Relationship(attribute='sessions',
68 self_view='v1.speaker_session',
69 self_view_kwargs={'id': '<id>'},
70 related_view='v1.session_list',
71 related_view_kwargs={'speaker_id': '<id>'},
72 schema='SessionSchema',
73 type_='session')
74
75
76 class SpeakerList(ResourceList):
77 """
78 List and create speakers
79 """
80 def query(self, view_kwargs):
81 """
82 query method for speakers list class
83 :param view_kwargs:
84 :return:
85 """
86 query_ = self.session.query(Speaker)
87 if view_kwargs.get('event_id'):
88 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
89 query_ = query_.join(Event).filter(Event.id == event.id)
90 elif view_kwargs.get('event_identifier'):
91 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
92 query_ = query_.join(Event).filter(Event.id == event.id)
93 if view_kwargs.get('user_id'):
94 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
95 query_ = query_.join(User).filter(User.id == user.id)
96 if view_kwargs.get('session_id'):
97 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
98 # session-speaker :: many-to-many relationship
99 query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))
100 return query_
101
102 def before_post(self, args, kwargs, data):
103 """
104 method to add user_id to view_kwargs before post
105 :param args:
106 :param kwargs:
107 :param data:
108 :return:
109 """
110 kwargs['user_id'] = current_identity.id
111
112 def before_create_object(self, data, view_kwargs):
113 """
114 before create object method for speaker list class
115 :param data:
116 :param view_kwargs:
117 :return:
118 """
119 if view_kwargs.get('event_id'):
120 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
121 data['event_id'] = event.id
122 elif view_kwargs.get('event_identifier'):
123 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
124 data['event_id'] = event.id
125 data['user_id'] = current_identity.id
126
127 view_kwargs = True
128 decorators = (api.has_permission('accessible_role_based_events'),)
129 schema = SpeakerSchema
130 data_layer = {'session': db.session,
131 'model': Speaker,
132 'methods': {
133 'query': query,
134 'before_create_object': before_create_object,
135 'before_post': before_post
136 }}
137
138
139 class SpeakerDetail(ResourceDetail):
140 """
141 Speakers Detail by id
142 """
143 decorators = (api.has_permission('is_coorganizer_or_user_itself', methods="PATCH,DELETE", fetch="event_id",
144 fetch_as="event_id", model=Speaker, check=lambda a: a.get('id') is not None),)
145 schema = SpeakerSchema
146 data_layer = {'session': db.session,
147 'model': Speaker}
148
149
150 class SpeakerRelationship(ResourceRelationship):
151 """
152 Speaker Relationship class
153 """
154 decorators = (jwt_required,)
155 schema = SpeakerSchema
156 data_layer = {'session': db.session,
157 'model': Speaker}
158
[end of app/api/speakers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/speakers.py b/app/api/speakers.py
--- a/app/api/speakers.py
+++ b/app/api/speakers.py
@@ -116,6 +116,9 @@
:param view_kwargs:
:return:
"""
+ if view_kwargs.get('session_id'):
+ session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
+ data['event_id'] = session.event_id
if view_kwargs.get('event_id'):
event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
data['event_id'] = event.id
@@ -124,6 +127,12 @@
data['event_id'] = event.id
data['user_id'] = current_identity.id
+ def after_create_object(self, obj, data, view_kwargs):
+ if view_kwargs.get('session_id'):
+ session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
+ session.speakers.append(obj)
+ self.session.commit()
+
view_kwargs = True
decorators = (api.has_permission('accessible_role_based_events'),)
schema = SpeakerSchema
@@ -132,7 +141,8 @@
'methods': {
'query': query,
'before_create_object': before_create_object,
- 'before_post': before_post
+ 'before_post': before_post,
+ 'after_create_object': after_create_object
}}
| {"golden_diff": "diff --git a/app/api/speakers.py b/app/api/speakers.py\n--- a/app/api/speakers.py\n+++ b/app/api/speakers.py\n@@ -116,6 +116,9 @@\n :param view_kwargs:\n :return:\n \"\"\"\n+ if view_kwargs.get('session_id'):\n+ session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n+ data['event_id'] = session.event_id\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n data['event_id'] = event.id\n@@ -124,6 +127,12 @@\n data['event_id'] = event.id\n data['user_id'] = current_identity.id\n \n+ def after_create_object(self, obj, data, view_kwargs):\n+ if view_kwargs.get('session_id'):\n+ session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n+ session.speakers.append(obj)\n+ self.session.commit()\n+\n view_kwargs = True\n decorators = (api.has_permission('accessible_role_based_events'),)\n schema = SpeakerSchema\n@@ -132,7 +141,8 @@\n 'methods': {\n 'query': query,\n 'before_create_object': before_create_object,\n- 'before_post': before_post\n+ 'before_post': before_post,\n+ 'after_create_object': after_create_object\n }}\n", "issue": "Method to insert session-speaker relationship missing\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\n\r\n**Expected behavior:**\r\nSession must be updated with related speakers and data inserted in speakers-session table. \r\n\r\n**Steps to reproduce:**\r\n<!-- If you are able to illustrate the bug or feature request with an example, please provide steps to reproduce -->\r\n\r\n**Related code:**\r\n\r\n```\r\ninsert any relevant code here else remove this section\r\n```\r\n\r\n**Other information:**\r\n<!-- List any other information that is relevant to your issue. Stack traces, related issues, suggestions on how to fix, Stack Overflow links, forum links, etc. -->\r\n\r\n**System information:** \r\n\r\n\r\n\n", "before_files": [{"content": "from marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.helpers.permissions import jwt_required, current_identity\nfrom app.models import db\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.user import User\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.bootstrap import api\n\n\nclass SpeakerSchema(Schema):\n \"\"\"\n Speaker Schema based on Speaker Model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for speaker schema\n \"\"\"\n type_ = 'speaker'\n self_view = 'v1.speaker_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n email = fields.Str(required=True)\n photo_url = fields.Url(allow_none=True)\n thumbnail_image_url = fields.Url(allow_none=True)\n small_image_url = fields.Url(allow_none=True)\n icon_image_url = fields.Url(allow_none=True)\n short_biography = fields.Str(allow_none=True)\n long_biography = fields.Str(allow_none=True)\n speaking_experience = fields.Str(allow_none=True)\n mobile = fields.Str(allow_none=True)\n website = fields.Url(allow_none=True)\n twitter = fields.Url(allow_none=True)\n facebook = fields.Url(allow_none=True)\n github = fields.Url(allow_none=True)\n linkedin = fields.Url(allow_none=True)\n organisation = fields.Str(allow_none=True)\n is_featured = fields.Boolean(default=False)\n position = fields.Str(allow_none=True)\n country = fields.Str(allow_none=True)\n city = fields.Str(allow_none=True)\n gender = fields.Str(allow_none=True)\n heard_from = fields.Str(allow_none=True)\n sponsorship_required = fields.Str(allow_none=True)\n event = Relationship(attribute='event',\n self_view='v1.speaker_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'speaker_id': '<id>'},\n schema='EventSchema',\n type_='event')\n user = Relationship(attribute='user',\n self_view='v1.speaker_user',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.user_detail',\n related_view_kwargs={'speaker_id': '<id>'},\n schema='UserSchema',\n type_='user')\n sessions = Relationship(attribute='sessions',\n self_view='v1.speaker_session',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_list',\n related_view_kwargs={'speaker_id': '<id>'},\n schema='SessionSchema',\n type_='session')\n\n\nclass SpeakerList(ResourceList):\n \"\"\"\n List and create speakers\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for speakers list class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Speaker)\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n query_ = query_.join(Event).filter(Event.id == event.id)\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n query_ = query_.join(Event).filter(Event.id == event.id)\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n query_ = query_.join(User).filter(User.id == user.id)\n if view_kwargs.get('session_id'):\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n # session-speaker :: many-to-many relationship\n query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))\n return query_\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n method to add user_id to view_kwargs before post\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n kwargs['user_id'] = current_identity.id\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create object method for speaker list class\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n data['event_id'] = event.id\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n data['event_id'] = event.id\n data['user_id'] = current_identity.id\n\n view_kwargs = True\n decorators = (api.has_permission('accessible_role_based_events'),)\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker,\n 'methods': {\n 'query': query,\n 'before_create_object': before_create_object,\n 'before_post': before_post\n }}\n\n\nclass SpeakerDetail(ResourceDetail):\n \"\"\"\n Speakers Detail by id\n \"\"\"\n decorators = (api.has_permission('is_coorganizer_or_user_itself', methods=\"PATCH,DELETE\", fetch=\"event_id\",\n fetch_as=\"event_id\", model=Speaker, check=lambda a: a.get('id') is not None),)\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n\n\nclass SpeakerRelationship(ResourceRelationship):\n \"\"\"\n Speaker Relationship class\n \"\"\"\n decorators = (jwt_required,)\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n", "path": "app/api/speakers.py"}]} | 2,388 | 340 |
gh_patches_debug_30038 | rasdani/github-patches | git_diff | frappe__frappe-21064 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Proposal] Add Parameters to Webhook Request URL
We use webhooks to connect erpnext to other apps and systems.
Most REST-Apis contain some ids in their URL. Even Frappe does (`/api/resource/{{doctype}}/{{docname}}`)
AFAIK, there is no way to dynamically set the Webhook request URL with contents of the document.
I propose to render the webhook-url as a template, much the same as the json body is. This would allow to call endpoints like
`http://example.com/api/{{doc.some_field}}`
</issue>
<code>
[start of frappe/integrations/doctype/webhook/webhook.py]
1 # Copyright (c) 2017, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 import base64
5 import hashlib
6 import hmac
7 import json
8 from time import sleep
9 from urllib.parse import urlparse
10
11 import requests
12
13 import frappe
14 from frappe import _
15 from frappe.model.document import Document
16 from frappe.utils.jinja import validate_template
17 from frappe.utils.safe_exec import get_safe_globals
18
19 WEBHOOK_SECRET_HEADER = "X-Frappe-Webhook-Signature"
20
21
22 class Webhook(Document):
23 def validate(self):
24 self.validate_docevent()
25 self.validate_condition()
26 self.validate_request_url()
27 self.validate_request_body()
28 self.validate_repeating_fields()
29 self.preview_document = None
30
31 def on_update(self):
32 frappe.cache().delete_value("webhooks")
33
34 def validate_docevent(self):
35 if self.webhook_doctype:
36 is_submittable = frappe.get_value("DocType", self.webhook_doctype, "is_submittable")
37 if not is_submittable and self.webhook_docevent in [
38 "on_submit",
39 "on_cancel",
40 "on_update_after_submit",
41 ]:
42 frappe.throw(_("DocType must be Submittable for the selected Doc Event"))
43
44 def validate_condition(self):
45 temp_doc = frappe.new_doc(self.webhook_doctype)
46 if self.condition:
47 try:
48 frappe.safe_eval(self.condition, eval_locals=get_context(temp_doc))
49 except Exception as e:
50 frappe.throw(_("Invalid Condition: {}").format(e))
51
52 def validate_request_url(self):
53 try:
54 request_url = urlparse(self.request_url).netloc
55 if not request_url:
56 raise frappe.ValidationError
57 except Exception as e:
58 frappe.throw(_("Check Request URL"), exc=e)
59
60 def validate_request_body(self):
61 if self.request_structure:
62 if self.request_structure == "Form URL-Encoded":
63 self.webhook_json = None
64 elif self.request_structure == "JSON":
65 validate_template(self.webhook_json)
66 self.webhook_data = []
67
68 def validate_repeating_fields(self):
69 """Error when Same Field is entered multiple times in webhook_data"""
70 webhook_data = []
71 for entry in self.webhook_data:
72 webhook_data.append(entry.fieldname)
73
74 if len(webhook_data) != len(set(webhook_data)):
75 frappe.throw(_("Same Field is entered more than once"))
76
77 @frappe.whitelist()
78 def generate_preview(self):
79 # This function doesn't need to do anything specific as virtual fields
80 # get evaluated automatically.
81 pass
82
83 @property
84 def meets_condition(self):
85 if not self.condition:
86 return _("Yes")
87
88 if not (self.preview_document and self.webhook_doctype):
89 return _("Select a document to check if it meets conditions.")
90
91 try:
92 doc = frappe.get_cached_doc(self.webhook_doctype, self.preview_document)
93 met_condition = frappe.safe_eval(self.condition, eval_locals=get_context(doc))
94 except Exception as e:
95 return _("Failed to evaluate conditions: {}").format(e)
96 return _("Yes") if met_condition else _("No")
97
98 @property
99 def preview_request_body(self):
100 if not (self.preview_document and self.webhook_doctype):
101 return _("Select a document to preview request data")
102
103 try:
104 doc = frappe.get_cached_doc(self.webhook_doctype, self.preview_document)
105 return frappe.as_json(get_webhook_data(doc, self))
106 except Exception as e:
107 return _("Failed to compute request body: {}").format(e)
108
109
110 def get_context(doc):
111 return {"doc": doc, "utils": get_safe_globals().get("frappe").get("utils")}
112
113
114 def enqueue_webhook(doc, webhook) -> None:
115 webhook: Webhook = frappe.get_doc("Webhook", webhook.get("name"))
116 headers = get_webhook_headers(doc, webhook)
117 data = get_webhook_data(doc, webhook)
118 r = None
119
120 for i in range(3):
121 try:
122 r = requests.request(
123 method=webhook.request_method,
124 url=webhook.request_url,
125 data=json.dumps(data, default=str),
126 headers=headers,
127 timeout=5,
128 )
129 r.raise_for_status()
130 frappe.logger().debug({"webhook_success": r.text})
131 log_request(webhook.name, doc.name, webhook.request_url, headers, data, r)
132 break
133
134 except requests.exceptions.ReadTimeout as e:
135 frappe.logger().debug({"webhook_error": e, "try": i + 1})
136 log_request(webhook.name, doc.name, webhook.request_url, headers, data)
137
138 except Exception as e:
139 frappe.logger().debug({"webhook_error": e, "try": i + 1})
140 log_request(webhook.name, doc.name, webhook.request_url, headers, data, r)
141 sleep(3 * i + 1)
142 if i != 2:
143 continue
144
145
146 def log_request(
147 webhook: str,
148 docname: str,
149 url: str,
150 headers: dict,
151 data: dict,
152 res: requests.Response | None = None,
153 ):
154 request_log = frappe.get_doc(
155 {
156 "doctype": "Webhook Request Log",
157 "webhook": webhook,
158 "reference_document": docname,
159 "user": frappe.session.user if frappe.session.user else None,
160 "url": url,
161 "headers": frappe.as_json(headers) if headers else None,
162 "data": frappe.as_json(data) if data else None,
163 "response": res and res.text,
164 "error": frappe.get_traceback(),
165 }
166 )
167
168 request_log.save(ignore_permissions=True)
169
170
171 def get_webhook_headers(doc, webhook):
172 headers = {}
173
174 if webhook.enable_security:
175 data = get_webhook_data(doc, webhook)
176 signature = base64.b64encode(
177 hmac.new(
178 webhook.get_password("webhook_secret").encode("utf8"),
179 json.dumps(data).encode("utf8"),
180 hashlib.sha256,
181 ).digest()
182 )
183 headers[WEBHOOK_SECRET_HEADER] = signature
184
185 if webhook.webhook_headers:
186 for h in webhook.webhook_headers:
187 if h.get("key") and h.get("value"):
188 headers[h.get("key")] = h.get("value")
189
190 return headers
191
192
193 def get_webhook_data(doc, webhook):
194 data = {}
195 doc = doc.as_dict(convert_dates_to_str=True)
196
197 if webhook.webhook_data:
198 data = {w.key: doc.get(w.fieldname) for w in webhook.webhook_data}
199 elif webhook.webhook_json:
200 data = frappe.render_template(webhook.webhook_json, get_context(doc))
201 data = json.loads(data)
202
203 return data
204
[end of frappe/integrations/doctype/webhook/webhook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/integrations/doctype/webhook/webhook.py b/frappe/integrations/doctype/webhook/webhook.py
--- a/frappe/integrations/doctype/webhook/webhook.py
+++ b/frappe/integrations/doctype/webhook/webhook.py
@@ -115,29 +115,34 @@
webhook: Webhook = frappe.get_doc("Webhook", webhook.get("name"))
headers = get_webhook_headers(doc, webhook)
data = get_webhook_data(doc, webhook)
- r = None
+ if webhook.is_dynamic_url:
+ request_url = frappe.render_template(webhook.request_url, get_context(doc))
+ else:
+ request_url = webhook.request_url
+
+ r = None
for i in range(3):
try:
r = requests.request(
method=webhook.request_method,
- url=webhook.request_url,
+ url=request_url,
data=json.dumps(data, default=str),
headers=headers,
timeout=5,
)
r.raise_for_status()
frappe.logger().debug({"webhook_success": r.text})
- log_request(webhook.name, doc.name, webhook.request_url, headers, data, r)
+ log_request(webhook.name, doc.name, request_url, headers, data, r)
break
except requests.exceptions.ReadTimeout as e:
frappe.logger().debug({"webhook_error": e, "try": i + 1})
- log_request(webhook.name, doc.name, webhook.request_url, headers, data)
+ log_request(webhook.name, doc.name, request_url, headers, data)
except Exception as e:
frappe.logger().debug({"webhook_error": e, "try": i + 1})
- log_request(webhook.name, doc.name, webhook.request_url, headers, data, r)
+ log_request(webhook.name, doc.name, request_url, headers, data, r)
sleep(3 * i + 1)
if i != 2:
continue
| {"golden_diff": "diff --git a/frappe/integrations/doctype/webhook/webhook.py b/frappe/integrations/doctype/webhook/webhook.py\n--- a/frappe/integrations/doctype/webhook/webhook.py\n+++ b/frappe/integrations/doctype/webhook/webhook.py\n@@ -115,29 +115,34 @@\n \twebhook: Webhook = frappe.get_doc(\"Webhook\", webhook.get(\"name\"))\n \theaders = get_webhook_headers(doc, webhook)\n \tdata = get_webhook_data(doc, webhook)\n-\tr = None\n \n+\tif webhook.is_dynamic_url:\n+\t\trequest_url = frappe.render_template(webhook.request_url, get_context(doc))\n+\telse:\n+\t\trequest_url = webhook.request_url\n+\n+\tr = None\n \tfor i in range(3):\n \t\ttry:\n \t\t\tr = requests.request(\n \t\t\t\tmethod=webhook.request_method,\n-\t\t\t\turl=webhook.request_url,\n+\t\t\t\turl=request_url,\n \t\t\t\tdata=json.dumps(data, default=str),\n \t\t\t\theaders=headers,\n \t\t\t\ttimeout=5,\n \t\t\t)\n \t\t\tr.raise_for_status()\n \t\t\tfrappe.logger().debug({\"webhook_success\": r.text})\n-\t\t\tlog_request(webhook.name, doc.name, webhook.request_url, headers, data, r)\n+\t\t\tlog_request(webhook.name, doc.name, request_url, headers, data, r)\n \t\t\tbreak\n \n \t\texcept requests.exceptions.ReadTimeout as e:\n \t\t\tfrappe.logger().debug({\"webhook_error\": e, \"try\": i + 1})\n-\t\t\tlog_request(webhook.name, doc.name, webhook.request_url, headers, data)\n+\t\t\tlog_request(webhook.name, doc.name, request_url, headers, data)\n \n \t\texcept Exception as e:\n \t\t\tfrappe.logger().debug({\"webhook_error\": e, \"try\": i + 1})\n-\t\t\tlog_request(webhook.name, doc.name, webhook.request_url, headers, data, r)\n+\t\t\tlog_request(webhook.name, doc.name, request_url, headers, data, r)\n \t\t\tsleep(3 * i + 1)\n \t\t\tif i != 2:\n \t\t\t\tcontinue\n", "issue": "[Proposal] Add Parameters to Webhook Request URL\nWe use webhooks to connect erpnext to other apps and systems.\r\n\r\nMost REST-Apis contain some ids in their URL. Even Frappe does (`/api/resource/{{doctype}}/{{docname}}`)\r\n\r\nAFAIK, there is no way to dynamically set the Webhook request URL with contents of the document.\r\n\r\n\r\nI propose to render the webhook-url as a template, much the same as the json body is. This would allow to call endpoints like\r\n`http://example.com/api/{{doc.some_field}}`\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2017, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport base64\nimport hashlib\nimport hmac\nimport json\nfrom time import sleep\nfrom urllib.parse import urlparse\n\nimport requests\n\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils.jinja import validate_template\nfrom frappe.utils.safe_exec import get_safe_globals\n\nWEBHOOK_SECRET_HEADER = \"X-Frappe-Webhook-Signature\"\n\n\nclass Webhook(Document):\n\tdef validate(self):\n\t\tself.validate_docevent()\n\t\tself.validate_condition()\n\t\tself.validate_request_url()\n\t\tself.validate_request_body()\n\t\tself.validate_repeating_fields()\n\t\tself.preview_document = None\n\n\tdef on_update(self):\n\t\tfrappe.cache().delete_value(\"webhooks\")\n\n\tdef validate_docevent(self):\n\t\tif self.webhook_doctype:\n\t\t\tis_submittable = frappe.get_value(\"DocType\", self.webhook_doctype, \"is_submittable\")\n\t\t\tif not is_submittable and self.webhook_docevent in [\n\t\t\t\t\"on_submit\",\n\t\t\t\t\"on_cancel\",\n\t\t\t\t\"on_update_after_submit\",\n\t\t\t]:\n\t\t\t\tfrappe.throw(_(\"DocType must be Submittable for the selected Doc Event\"))\n\n\tdef validate_condition(self):\n\t\ttemp_doc = frappe.new_doc(self.webhook_doctype)\n\t\tif self.condition:\n\t\t\ttry:\n\t\t\t\tfrappe.safe_eval(self.condition, eval_locals=get_context(temp_doc))\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.throw(_(\"Invalid Condition: {}\").format(e))\n\n\tdef validate_request_url(self):\n\t\ttry:\n\t\t\trequest_url = urlparse(self.request_url).netloc\n\t\t\tif not request_url:\n\t\t\t\traise frappe.ValidationError\n\t\texcept Exception as e:\n\t\t\tfrappe.throw(_(\"Check Request URL\"), exc=e)\n\n\tdef validate_request_body(self):\n\t\tif self.request_structure:\n\t\t\tif self.request_structure == \"Form URL-Encoded\":\n\t\t\t\tself.webhook_json = None\n\t\t\telif self.request_structure == \"JSON\":\n\t\t\t\tvalidate_template(self.webhook_json)\n\t\t\t\tself.webhook_data = []\n\n\tdef validate_repeating_fields(self):\n\t\t\"\"\"Error when Same Field is entered multiple times in webhook_data\"\"\"\n\t\twebhook_data = []\n\t\tfor entry in self.webhook_data:\n\t\t\twebhook_data.append(entry.fieldname)\n\n\t\tif len(webhook_data) != len(set(webhook_data)):\n\t\t\tfrappe.throw(_(\"Same Field is entered more than once\"))\n\n\[email protected]()\n\tdef generate_preview(self):\n\t\t# This function doesn't need to do anything specific as virtual fields\n\t\t# get evaluated automatically.\n\t\tpass\n\n\t@property\n\tdef meets_condition(self):\n\t\tif not self.condition:\n\t\t\treturn _(\"Yes\")\n\n\t\tif not (self.preview_document and self.webhook_doctype):\n\t\t\treturn _(\"Select a document to check if it meets conditions.\")\n\n\t\ttry:\n\t\t\tdoc = frappe.get_cached_doc(self.webhook_doctype, self.preview_document)\n\t\t\tmet_condition = frappe.safe_eval(self.condition, eval_locals=get_context(doc))\n\t\texcept Exception as e:\n\t\t\treturn _(\"Failed to evaluate conditions: {}\").format(e)\n\t\treturn _(\"Yes\") if met_condition else _(\"No\")\n\n\t@property\n\tdef preview_request_body(self):\n\t\tif not (self.preview_document and self.webhook_doctype):\n\t\t\treturn _(\"Select a document to preview request data\")\n\n\t\ttry:\n\t\t\tdoc = frappe.get_cached_doc(self.webhook_doctype, self.preview_document)\n\t\t\treturn frappe.as_json(get_webhook_data(doc, self))\n\t\texcept Exception as e:\n\t\t\treturn _(\"Failed to compute request body: {}\").format(e)\n\n\ndef get_context(doc):\n\treturn {\"doc\": doc, \"utils\": get_safe_globals().get(\"frappe\").get(\"utils\")}\n\n\ndef enqueue_webhook(doc, webhook) -> None:\n\twebhook: Webhook = frappe.get_doc(\"Webhook\", webhook.get(\"name\"))\n\theaders = get_webhook_headers(doc, webhook)\n\tdata = get_webhook_data(doc, webhook)\n\tr = None\n\n\tfor i in range(3):\n\t\ttry:\n\t\t\tr = requests.request(\n\t\t\t\tmethod=webhook.request_method,\n\t\t\t\turl=webhook.request_url,\n\t\t\t\tdata=json.dumps(data, default=str),\n\t\t\t\theaders=headers,\n\t\t\t\ttimeout=5,\n\t\t\t)\n\t\t\tr.raise_for_status()\n\t\t\tfrappe.logger().debug({\"webhook_success\": r.text})\n\t\t\tlog_request(webhook.name, doc.name, webhook.request_url, headers, data, r)\n\t\t\tbreak\n\n\t\texcept requests.exceptions.ReadTimeout as e:\n\t\t\tfrappe.logger().debug({\"webhook_error\": e, \"try\": i + 1})\n\t\t\tlog_request(webhook.name, doc.name, webhook.request_url, headers, data)\n\n\t\texcept Exception as e:\n\t\t\tfrappe.logger().debug({\"webhook_error\": e, \"try\": i + 1})\n\t\t\tlog_request(webhook.name, doc.name, webhook.request_url, headers, data, r)\n\t\t\tsleep(3 * i + 1)\n\t\t\tif i != 2:\n\t\t\t\tcontinue\n\n\ndef log_request(\n\twebhook: str,\n\tdocname: str,\n\turl: str,\n\theaders: dict,\n\tdata: dict,\n\tres: requests.Response | None = None,\n):\n\trequest_log = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Webhook Request Log\",\n\t\t\t\"webhook\": webhook,\n\t\t\t\"reference_document\": docname,\n\t\t\t\"user\": frappe.session.user if frappe.session.user else None,\n\t\t\t\"url\": url,\n\t\t\t\"headers\": frappe.as_json(headers) if headers else None,\n\t\t\t\"data\": frappe.as_json(data) if data else None,\n\t\t\t\"response\": res and res.text,\n\t\t\t\"error\": frappe.get_traceback(),\n\t\t}\n\t)\n\n\trequest_log.save(ignore_permissions=True)\n\n\ndef get_webhook_headers(doc, webhook):\n\theaders = {}\n\n\tif webhook.enable_security:\n\t\tdata = get_webhook_data(doc, webhook)\n\t\tsignature = base64.b64encode(\n\t\t\thmac.new(\n\t\t\t\twebhook.get_password(\"webhook_secret\").encode(\"utf8\"),\n\t\t\t\tjson.dumps(data).encode(\"utf8\"),\n\t\t\t\thashlib.sha256,\n\t\t\t).digest()\n\t\t)\n\t\theaders[WEBHOOK_SECRET_HEADER] = signature\n\n\tif webhook.webhook_headers:\n\t\tfor h in webhook.webhook_headers:\n\t\t\tif h.get(\"key\") and h.get(\"value\"):\n\t\t\t\theaders[h.get(\"key\")] = h.get(\"value\")\n\n\treturn headers\n\n\ndef get_webhook_data(doc, webhook):\n\tdata = {}\n\tdoc = doc.as_dict(convert_dates_to_str=True)\n\n\tif webhook.webhook_data:\n\t\tdata = {w.key: doc.get(w.fieldname) for w in webhook.webhook_data}\n\telif webhook.webhook_json:\n\t\tdata = frappe.render_template(webhook.webhook_json, get_context(doc))\n\t\tdata = json.loads(data)\n\n\treturn data\n", "path": "frappe/integrations/doctype/webhook/webhook.py"}]} | 2,688 | 450 |
gh_patches_debug_21010 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-3169 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Condition Specified but with no condition passes linting but fails deploy
### CloudFormation Lint Version
0.83.1
### What operating system are you using?
mac/ubuntu
### Describe the bug
in a cfn template if you specify root level item `Conditions` but have no conditions this passes cfn-lint but always fails on deploy
### Expected behavior
cfn-lint should fail if there is a Conditions root level object but no array entries under it.
### Reproduction template
```
AWSTemplateFormatVersion: "2010-09-09"
Parameters:
myParam
Conditions:
Resources:
myTopic:
Type: AWS::SNS::Topic
Properties:
DisplayName: mytopic
TopicName: mytopic
```
</issue>
<code>
[start of src/cfnlint/rules/conditions/Configuration.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 from cfnlint.rules import CloudFormationLintRule, RuleMatch
7
8
9 class Configuration(CloudFormationLintRule):
10 """Check if Conditions are configured correctly"""
11
12 id = "E8001"
13 shortdesc = "Conditions have appropriate properties"
14 description = "Check if Conditions are properly configured"
15 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html"
16 tags = ["conditions"]
17
18 condition_keys = [
19 "Condition",
20 "Fn::And",
21 "Fn::Equals",
22 "Fn::Not",
23 "Fn::Or",
24 ]
25
26 def match(self, cfn):
27 matches = []
28
29 conditions = cfn.template.get("Conditions", {})
30 if conditions:
31 for condname, condobj in conditions.items():
32 if not isinstance(condobj, dict):
33 message = "Condition {0} has invalid property"
34 matches.append(
35 RuleMatch(["Conditions", condname], message.format(condname))
36 )
37 else:
38 if len(condobj) != 1:
39 message = "Condition {0} has too many intrinsic conditions"
40 matches.append(
41 RuleMatch(
42 ["Conditions", condname], message.format(condname)
43 )
44 )
45 else:
46 for k, _ in condobj.items():
47 if k not in self.condition_keys:
48 message = "Condition {0} has invalid property {1}"
49 matches.append(
50 RuleMatch(
51 ["Conditions", condname] + [k],
52 message.format(condname, k),
53 )
54 )
55
56 return matches
57
[end of src/cfnlint/rules/conditions/Configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/conditions/Configuration.py b/src/cfnlint/rules/conditions/Configuration.py
--- a/src/cfnlint/rules/conditions/Configuration.py
+++ b/src/cfnlint/rules/conditions/Configuration.py
@@ -26,8 +26,10 @@
def match(self, cfn):
matches = []
- conditions = cfn.template.get("Conditions", {})
- if conditions:
+ if "Conditions" not in cfn.template:
+ return matches
+ conditions = cfn.template.get("Conditions", None)
+ if isinstance(conditions, dict):
for condname, condobj in conditions.items():
if not isinstance(condobj, dict):
message = "Condition {0} has invalid property"
@@ -52,5 +54,12 @@
message.format(condname, k),
)
)
+ else:
+ matches.append(
+ RuleMatch(
+ ["Conditions"],
+ "Condition must be an object",
+ )
+ )
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/conditions/Configuration.py b/src/cfnlint/rules/conditions/Configuration.py\n--- a/src/cfnlint/rules/conditions/Configuration.py\n+++ b/src/cfnlint/rules/conditions/Configuration.py\n@@ -26,8 +26,10 @@\n def match(self, cfn):\n matches = []\n \n- conditions = cfn.template.get(\"Conditions\", {})\n- if conditions:\n+ if \"Conditions\" not in cfn.template:\n+ return matches\n+ conditions = cfn.template.get(\"Conditions\", None)\n+ if isinstance(conditions, dict):\n for condname, condobj in conditions.items():\n if not isinstance(condobj, dict):\n message = \"Condition {0} has invalid property\"\n@@ -52,5 +54,12 @@\n message.format(condname, k),\n )\n )\n+ else:\n+ matches.append(\n+ RuleMatch(\n+ [\"Conditions\"],\n+ \"Condition must be an object\",\n+ )\n+ )\n \n return matches\n", "issue": "Condition Specified but with no condition passes linting but fails deploy\n### CloudFormation Lint Version\r\n\r\n0.83.1\r\n\r\n### What operating system are you using?\r\n\r\nmac/ubuntu\r\n\r\n### Describe the bug\r\n\r\nin a cfn template if you specify root level item `Conditions` but have no conditions this passes cfn-lint but always fails on deploy\r\n\r\n### Expected behavior\r\n\r\ncfn-lint should fail if there is a Conditions root level object but no array entries under it.\r\n\r\n### Reproduction template\r\n\r\n```\r\nAWSTemplateFormatVersion: \"2010-09-09\"\r\nParameters:\r\n myParam\r\nConditions:\r\nResources:\r\n myTopic:\r\n Type: AWS::SNS::Topic\r\n Properties:\r\n DisplayName: mytopic\r\n TopicName: mytopic\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Conditions are configured correctly\"\"\"\n\n id = \"E8001\"\n shortdesc = \"Conditions have appropriate properties\"\n description = \"Check if Conditions are properly configured\"\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html\"\n tags = [\"conditions\"]\n\n condition_keys = [\n \"Condition\",\n \"Fn::And\",\n \"Fn::Equals\",\n \"Fn::Not\",\n \"Fn::Or\",\n ]\n\n def match(self, cfn):\n matches = []\n\n conditions = cfn.template.get(\"Conditions\", {})\n if conditions:\n for condname, condobj in conditions.items():\n if not isinstance(condobj, dict):\n message = \"Condition {0} has invalid property\"\n matches.append(\n RuleMatch([\"Conditions\", condname], message.format(condname))\n )\n else:\n if len(condobj) != 1:\n message = \"Condition {0} has too many intrinsic conditions\"\n matches.append(\n RuleMatch(\n [\"Conditions\", condname], message.format(condname)\n )\n )\n else:\n for k, _ in condobj.items():\n if k not in self.condition_keys:\n message = \"Condition {0} has invalid property {1}\"\n matches.append(\n RuleMatch(\n [\"Conditions\", condname] + [k],\n message.format(condname, k),\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/conditions/Configuration.py"}]} | 1,178 | 230 |
gh_patches_debug_43642 | rasdani/github-patches | git_diff | lightly-ai__lightly-701 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_lightly_cli is missing it's return statement
as a side effect `train_model_and_embed_images` is not working as expected.
</issue>
<code>
[start of lightly/cli/embed_cli.py]
1 # -*- coding: utf-8 -*-
2 """**Lightly Embed:** Embed images with one command.
3
4 This module contains the entrypoint for the **lightly-embed**
5 command-line interface.
6 """
7
8 # Copyright (c) 2020. Lightly AG and its affiliates.
9 # All Rights Reserved
10
11 import os
12
13 import hydra
14 import torch
15 import torchvision
16 from torch.utils.hipify.hipify_python import bcolors
17
18 from lightly.data import LightlyDataset
19
20 from lightly.utils import save_embeddings
21
22 from lightly.cli._helpers import get_model_from_config
23 from lightly.cli._helpers import fix_input_path
24 from lightly.cli._helpers import cpu_count
25
26
27 def _embed_cli(cfg, is_cli_call=True):
28 input_dir = cfg['input_dir']
29 if input_dir and is_cli_call:
30 input_dir = fix_input_path(input_dir)
31
32 torch.backends.cudnn.deterministic = True
33 torch.backends.cudnn.benchmark = False
34
35 if torch.cuda.is_available():
36 device = torch.device('cuda')
37 else:
38 device = torch.device('cpu')
39
40 transform = torchvision.transforms.Compose(
41 [
42 torchvision.transforms.Resize(
43 (cfg['collate']['input_size'], cfg['collate']['input_size'])
44 ),
45 torchvision.transforms.ToTensor(),
46 torchvision.transforms.Normalize(
47 mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
48 ),
49 ]
50 )
51
52 dataset = LightlyDataset(input_dir, transform=transform)
53
54 # disable drop_last and shuffle
55 cfg['loader']['drop_last'] = False
56 cfg['loader']['shuffle'] = False
57 cfg['loader']['batch_size'] = min(cfg['loader']['batch_size'], len(dataset))
58
59 # determine the number of available cores
60 if cfg['loader']['num_workers'] < 0:
61 cfg['loader']['num_workers'] = cpu_count()
62
63 dataloader = torch.utils.data.DataLoader(dataset, **cfg['loader'])
64
65 encoder = get_model_from_config(cfg, is_cli_call)
66
67 embeddings, labels, filenames = encoder.embed(dataloader, device=device)
68
69 if is_cli_call:
70 path = os.path.join(os.getcwd(), 'embeddings.csv')
71 save_embeddings(path, embeddings, labels, filenames)
72 print(f'Embeddings are stored at {bcolors.OKBLUE}{path}{bcolors.ENDC}')
73 os.environ[
74 cfg['environment_variable_names']['lightly_last_embedding_path']
75 ] = path
76 return path
77
78 return embeddings, labels, filenames
79
80
81 @hydra.main(config_path='config', config_name='config')
82 def embed_cli(cfg):
83 """Embed images from the command-line.
84
85 Args:
86 cfg:
87 The default configs are loaded from the config file.
88 To overwrite them please see the section on the config file
89 (.config.config.yaml).
90
91 Command-Line Args:
92 input_dir:
93 Path to the input directory where images are stored.
94 checkpoint:
95 Path to the checkpoint of a pretrained model. If left
96 empty, a pretrained model by lightly is used.
97
98 Examples:
99 >>> # embed images with default settings and a lightly model
100 >>> lightly-embed input_dir=data/
101 >>>
102 >>> # embed images with default settings and a custom checkpoint
103 >>> lightly-embed input_dir=data/ checkpoint=my_checkpoint.ckpt
104 >>>
105 >>> # embed images with custom settings
106 >>> lightly-embed input_dir=data/ model.num_ftrs=32
107
108 """
109 return _embed_cli(cfg)
110
111
112 def entry():
113 embed_cli()
114
[end of lightly/cli/embed_cli.py]
[start of lightly/core.py]
1 """ Contains the core functionality of the lightly Python package. """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 from lightly.cli.train_cli import _train_cli
7 from lightly.cli.embed_cli import _embed_cli
8 from lightly.cli.lightly_cli import _lightly_cli
9 import lightly.cli as cli
10
11 import yaml
12 import os
13
14
15 def _get_config_path(config_path):
16 """Find path to yaml config file
17
18 Args:
19 config_path: (str) Path to config.yaml file
20
21 Returns:
22 Path to config.yaml if specified else default config.yaml
23
24 Raises:
25 ValueError: If the config_path is not None but doesn't exist
26
27 """
28 if config_path is None:
29 dirname = os.path.dirname(cli.__file__)
30 config_path = os.path.join(dirname, 'config/config.yaml')
31 if not os.path.exists(config_path):
32 raise ValueError("Config path {} does not exist!".format(config_path))
33
34 return config_path
35
36
37 def _load_config_file(config_path):
38 """Load a yaml config file
39
40 Args:
41 config_path: (str) Path to config.yaml file
42
43 Returns:
44 Dictionary with configs from config.yaml
45
46 """
47 Loader = yaml.FullLoader
48 with open(config_path, 'r') as config_file:
49 cfg = yaml.load(config_file, Loader=Loader)
50
51 return cfg
52
53
54 def _add_kwargs(cfg, kwargs):
55 """Add keyword arguments to config
56
57 Args:
58 cfg: (dict) Dictionary of configs from config.yaml
59 kwargs: (dict) Dictionary of keyword arguments
60
61 Returns:
62 Union of cfg and kwargs
63
64 """
65 for key, item in kwargs.items():
66 if isinstance(item, dict):
67 if key in cfg:
68 cfg[key] = _add_kwargs(cfg[key], item)
69 else:
70 cfg[key] = item
71 else:
72 cfg[key] = item
73 return cfg
74
75
76 def train_model_and_embed_images(config_path: str = None, **kwargs):
77 """Train a self-supervised model and use it to embed images.
78
79 Calls the same function as lightly-magic. All arguments passed to
80 lightly-magic can also be passed to this function (see below for an
81 example).
82
83 Args:
84 config_path:
85 Path to config.yaml. If None, the default configs will be used.
86 **kwargs:
87 Overwrite default configs py passing keyword arguments.
88
89 Returns:
90 Embeddings, labels, and filenames of the images.
91
92 Examples:
93 >>> import lightly
94 >>>
95 >>> # train a model and embed images with default configs
96 >>> embeddings, _, _ = lightly.train_model_and_embed_images(
97 >>> input_dir='path/to/data')
98 >>>
99 >>> # train a model and embed images with separate config file
100 >>> my_config_path = 'my/config/file.yaml'
101 >>> embeddings, _, _ = lightly.train_model_and_embed_images(
102 >>> input_dir='path/to/data', config_path=my_config_path)
103 >>>
104 >>> # train a model and embed images with default settings + overwrites
105 >>> my_trainer = {max_epochs: 10}
106 >>> embeddings, _, _ = lightly.train_model_and_embed_images(
107 >>> input_dir='path/to/data', trainer=my_trainer)
108 >>> # the command above is equivalent to:
109 >>> # lightly-magic input_dir='path/to/data' trainer.max_epochs=10
110
111 """
112 config_path = _get_config_path(config_path)
113 config_args = _load_config_file(config_path)
114 config_args = _add_kwargs(config_args, kwargs)
115 return _lightly_cli(config_args, is_cli_call=False)
116
117
118 def train_embedding_model(config_path: str = None, **kwargs):
119 """Train a self-supervised model.
120
121 Calls the same function as lightly-train. All arguments passed to
122 lightly-train can also be passed to this function (see below for an
123 example).
124
125 Args:
126 config_path:
127 Path to config.yaml. If None, the default configs will be used.
128 **kwargs:
129 Overwrite default configs py passing keyword arguments.
130
131 Returns:
132 Path to checkpoint of the trained embedding model.
133
134 Examples:
135 >>> import lightly
136 >>>
137 >>> # train a model with default configs
138 >>> checkpoint_path = lightly.train_embedding_model(
139 >>> input_dir='path/to/data')
140 >>>
141 >>> # train a model with separate config file
142 >>> my_config_path = 'my/config/file.yaml'
143 >>> checkpoint_path = lightly.train_embedding_model(
144 >>> input_dir='path/to/data', config_path=my_config_path)
145 >>>
146 >>> # train a model with default settings and overwrites: large batch
147 >>> # sizes are benefitial for self-supervised training and more
148 >>> # workers speed up the dataloading process.
149 >>> my_loader = {
150 >>> batch_size: 100,
151 >>> num_workers: 8,
152 >>> }
153 >>> checkpoint_path = lightly.train_embedding_model(
154 >>> input_dir='path/to/data', loader=my_loader)
155 >>> # the command above is equivalent to:
156 >>> # lightly-train input_dir='path/to/data' loader.batch_size=100 loader.num_workers=8
157 """
158 config_path = _get_config_path(config_path)
159 config_args = _load_config_file(config_path)
160 config_args = _add_kwargs(config_args, kwargs)
161
162 return _train_cli(config_args, is_cli_call=False)
163
164
165 def embed_images(checkpoint: str, config_path: str = None, **kwargs):
166 """Embed images with a self-supervised model.
167
168 Calls the same function as lightly-embed. All arguments passed to
169 lightly-embed can also be passed to this function (see below for an
170 example).
171
172 Args:
173 checkpoint:
174 Path to the checkpoint file for the embedding model.
175 config_path:
176 Path to config.yaml. If None, the default configs will be used.
177 **kwargs:
178 Overwrite default configs py passing keyword arguments.
179
180 Returns:
181 Embeddings, labels, and filenames of the images.
182
183 Examples:
184 >>> import lightly
185 >>> my_checkpoint_path = 'path/to/checkpoint.ckpt'
186 >>>
187 >>> # embed images with default configs
188 >>> embeddings, _, _ = lightly.embed_images(
189 >>> my_checkpoint_path, input_dir='path/to/data')
190 >>>
191 >>> # embed images with separate config file
192 >>> my_config_path = 'my/config/file.yaml'
193 >>> embeddings, _, _ = lightly.embed_images(
194 >>> my_checkpoint_path, input_dir='path/to/data', config_path=my_config_path)
195 >>>
196 >>> # embed images with default settings and overwrites: at inference,
197 >>> # we can use larger input_sizes because it requires less memory.
198 >>> my_collate = {input_size: 256}
199 >>> embeddings, _, _ = lightly.embed_images(
200 >>> my_checkpoint_path, input_dir='path/to/data', collate=my_collate)
201 >>> # the command above is equivalent to:
202 >>> # lightly-embed input_dir='path/to/data' collate.input_size=256
203
204 """
205 config_path = _get_config_path(config_path)
206 config_args = _load_config_file(config_path)
207 config_args = _add_kwargs(config_args, kwargs)
208
209 config_args['checkpoint'] = checkpoint
210
211 return _embed_cli(config_args, is_cli_call=False)
212
[end of lightly/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/cli/embed_cli.py b/lightly/cli/embed_cli.py
--- a/lightly/cli/embed_cli.py
+++ b/lightly/cli/embed_cli.py
@@ -9,8 +9,10 @@
# All Rights Reserved
import os
+from typing import Union, Tuple, List
import hydra
+import numpy as np
import torch
import torchvision
from torch.utils.hipify.hipify_python import bcolors
@@ -24,7 +26,21 @@
from lightly.cli._helpers import cpu_count
-def _embed_cli(cfg, is_cli_call=True):
+def _embed_cli(cfg, is_cli_call=True) -> \
+ Union[
+ Tuple[np.ndarray, List[int], List[str]],
+ str
+ ]:
+ """ See embed_cli() for usage documentation
+
+ is_cli_call:
+ If True:
+ Saves the embeddings as file and returns the filepath.
+ If False:
+ Returns the embeddings, labels, filenames as tuple.
+ Embeddings are of shape (n_samples, embedding_size)
+ len(labels) = len(filenames) = n_samples
+ """
input_dir = cfg['input_dir']
if input_dir and is_cli_call:
input_dir = fix_input_path(input_dir)
@@ -79,7 +95,7 @@
@hydra.main(config_path='config', config_name='config')
-def embed_cli(cfg):
+def embed_cli(cfg) -> str:
"""Embed images from the command-line.
Args:
@@ -95,6 +111,9 @@
Path to the checkpoint of a pretrained model. If left
empty, a pretrained model by lightly is used.
+ Returns:
+ The path to the created embeddings file.
+
Examples:
>>> # embed images with default settings and a lightly model
>>> lightly-embed input_dir=data/
diff --git a/lightly/core.py b/lightly/core.py
--- a/lightly/core.py
+++ b/lightly/core.py
@@ -2,6 +2,9 @@
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
+from typing import Tuple, List
+
+import numpy as np
from lightly.cli.train_cli import _train_cli
from lightly.cli.embed_cli import _embed_cli
@@ -73,12 +76,15 @@
return cfg
-def train_model_and_embed_images(config_path: str = None, **kwargs):
+def train_model_and_embed_images(config_path: str = None, **kwargs) -> Tuple[
+ np.ndarray, List[int], List[str]
+]:
"""Train a self-supervised model and use it to embed images.
- Calls the same function as lightly-magic. All arguments passed to
- lightly-magic can also be passed to this function (see below for an
- example).
+ First trains a modle using the _train_cli(),
+ then embeds with the _embed_cli().
+ All arguments passed to the CLI functions
+ can also be passed to this function (see below for an example).
Args:
config_path:
@@ -88,6 +94,8 @@
Returns:
Embeddings, labels, and filenames of the images.
+ Embeddings are of shape (n_samples, embedding_size)
+ len(labels) = len(filenames) = n_samples
Examples:
>>> import lightly
@@ -105,14 +113,16 @@
>>> my_trainer = {max_epochs: 10}
>>> embeddings, _, _ = lightly.train_model_and_embed_images(
>>> input_dir='path/to/data', trainer=my_trainer)
- >>> # the command above is equivalent to:
- >>> # lightly-magic input_dir='path/to/data' trainer.max_epochs=10
"""
config_path = _get_config_path(config_path)
config_args = _load_config_file(config_path)
config_args = _add_kwargs(config_args, kwargs)
- return _lightly_cli(config_args, is_cli_call=False)
+
+ checkpoint = _train_cli(config_args, is_cli_call=False)
+ config_args['checkpoint'] = checkpoint
+ embeddings, labels, filenames = _embed_cli(config_args, is_cli_call=False)
+ return embeddings, labels, filenames
def train_embedding_model(config_path: str = None, **kwargs):
| {"golden_diff": "diff --git a/lightly/cli/embed_cli.py b/lightly/cli/embed_cli.py\n--- a/lightly/cli/embed_cli.py\n+++ b/lightly/cli/embed_cli.py\n@@ -9,8 +9,10 @@\n # All Rights Reserved\n \n import os\n+from typing import Union, Tuple, List\n \n import hydra\n+import numpy as np\n import torch\n import torchvision\n from torch.utils.hipify.hipify_python import bcolors\n@@ -24,7 +26,21 @@\n from lightly.cli._helpers import cpu_count\n \n \n-def _embed_cli(cfg, is_cli_call=True):\n+def _embed_cli(cfg, is_cli_call=True) -> \\\n+ Union[\n+ Tuple[np.ndarray, List[int], List[str]],\n+ str\n+ ]:\n+ \"\"\" See embed_cli() for usage documentation\n+\n+ is_cli_call:\n+ If True:\n+ Saves the embeddings as file and returns the filepath.\n+ If False:\n+ Returns the embeddings, labels, filenames as tuple.\n+ Embeddings are of shape (n_samples, embedding_size)\n+ len(labels) = len(filenames) = n_samples\n+ \"\"\"\n input_dir = cfg['input_dir']\n if input_dir and is_cli_call:\n input_dir = fix_input_path(input_dir)\n@@ -79,7 +95,7 @@\n \n \n @hydra.main(config_path='config', config_name='config')\n-def embed_cli(cfg):\n+def embed_cli(cfg) -> str:\n \"\"\"Embed images from the command-line.\n \n Args:\n@@ -95,6 +111,9 @@\n Path to the checkpoint of a pretrained model. If left\n empty, a pretrained model by lightly is used.\n \n+ Returns:\n+ The path to the created embeddings file.\n+\n Examples:\n >>> #\u00a0embed images with default settings and a lightly model\n >>> lightly-embed input_dir=data/\ndiff --git a/lightly/core.py b/lightly/core.py\n--- a/lightly/core.py\n+++ b/lightly/core.py\n@@ -2,6 +2,9 @@\n \n # Copyright (c) 2020. Lightly AG and its affiliates.\n # All Rights Reserved\n+from typing import Tuple, List\n+\n+import numpy as np\n \n from lightly.cli.train_cli import _train_cli\n from lightly.cli.embed_cli import _embed_cli\n@@ -73,12 +76,15 @@\n return cfg\n \n \n-def train_model_and_embed_images(config_path: str = None, **kwargs):\n+def train_model_and_embed_images(config_path: str = None, **kwargs) -> Tuple[\n+ np.ndarray, List[int], List[str]\n+]:\n \"\"\"Train a self-supervised model and use it to embed images.\n \n- Calls the same function as lightly-magic. All arguments passed to\n- lightly-magic can also be passed to this function (see below for an\n- example).\n+ First trains a modle using the _train_cli(),\n+ then embeds with the _embed_cli().\n+ All arguments passed to the CLI functions\n+ can also be passed to this function (see below for an example).\n \n Args:\n config_path:\n@@ -88,6 +94,8 @@\n \n Returns:\n Embeddings, labels, and filenames of the images.\n+ Embeddings are of shape (n_samples, embedding_size)\n+ len(labels) = len(filenames) = n_samples\n \n Examples:\n >>> import lightly\n@@ -105,14 +113,16 @@\n >>> my_trainer = {max_epochs: 10}\n >>> embeddings, _, _ = lightly.train_model_and_embed_images(\n >>> input_dir='path/to/data', trainer=my_trainer)\n- >>> #\u00a0the command above is equivalent to:\n- >>> #\u00a0lightly-magic input_dir='path/to/data' trainer.max_epochs=10\n \n \"\"\"\n config_path = _get_config_path(config_path)\n config_args = _load_config_file(config_path)\n config_args = _add_kwargs(config_args, kwargs)\n- return _lightly_cli(config_args, is_cli_call=False)\n+\n+ checkpoint = _train_cli(config_args, is_cli_call=False)\n+ config_args['checkpoint'] = checkpoint\n+ embeddings, labels, filenames = _embed_cli(config_args, is_cli_call=False)\n+ return embeddings, labels, filenames\n \n \n def train_embedding_model(config_path: str = None, **kwargs):\n", "issue": "_lightly_cli is missing it's return statement\nas a side effect `train_model_and_embed_images` is not working as expected.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"**Lightly Embed:** Embed images with one command.\n\nThis module contains the entrypoint for the **lightly-embed**\ncommand-line interface.\n\"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\n\nimport hydra\nimport torch\nimport torchvision\nfrom torch.utils.hipify.hipify_python import bcolors\n\nfrom lightly.data import LightlyDataset\n\nfrom lightly.utils import save_embeddings\n\nfrom lightly.cli._helpers import get_model_from_config\nfrom lightly.cli._helpers import fix_input_path\nfrom lightly.cli._helpers import cpu_count\n\n\ndef _embed_cli(cfg, is_cli_call=True):\n input_dir = cfg['input_dir']\n if input_dir and is_cli_call:\n input_dir = fix_input_path(input_dir)\n\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n transform = torchvision.transforms.Compose(\n [\n torchvision.transforms.Resize(\n (cfg['collate']['input_size'], cfg['collate']['input_size'])\n ),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n ),\n ]\n )\n\n dataset = LightlyDataset(input_dir, transform=transform)\n\n # disable drop_last and shuffle\n cfg['loader']['drop_last'] = False\n cfg['loader']['shuffle'] = False\n cfg['loader']['batch_size'] = min(cfg['loader']['batch_size'], len(dataset))\n\n # determine the number of available cores\n if cfg['loader']['num_workers'] < 0:\n cfg['loader']['num_workers'] = cpu_count()\n\n dataloader = torch.utils.data.DataLoader(dataset, **cfg['loader'])\n\n encoder = get_model_from_config(cfg, is_cli_call)\n\n embeddings, labels, filenames = encoder.embed(dataloader, device=device)\n\n if is_cli_call:\n path = os.path.join(os.getcwd(), 'embeddings.csv')\n save_embeddings(path, embeddings, labels, filenames)\n print(f'Embeddings are stored at {bcolors.OKBLUE}{path}{bcolors.ENDC}')\n os.environ[\n cfg['environment_variable_names']['lightly_last_embedding_path']\n ] = path\n return path\n\n return embeddings, labels, filenames\n\n\[email protected](config_path='config', config_name='config')\ndef embed_cli(cfg):\n \"\"\"Embed images from the command-line.\n\n Args:\n cfg:\n The default configs are loaded from the config file.\n To overwrite them please see the section on the config file\n (.config.config.yaml).\n\n Command-Line Args:\n input_dir:\n Path to the input directory where images are stored.\n checkpoint:\n Path to the checkpoint of a pretrained model. If left\n empty, a pretrained model by lightly is used.\n\n Examples:\n >>> #\u00a0embed images with default settings and a lightly model\n >>> lightly-embed input_dir=data/\n >>>\n >>> # embed images with default settings and a custom checkpoint\n >>> lightly-embed input_dir=data/ checkpoint=my_checkpoint.ckpt\n >>>\n >>> # embed images with custom settings\n >>> lightly-embed input_dir=data/ model.num_ftrs=32\n\n \"\"\"\n return _embed_cli(cfg)\n\n\ndef entry():\n embed_cli()\n", "path": "lightly/cli/embed_cli.py"}, {"content": "\"\"\" Contains the core functionality of the lightly Python package. \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nfrom lightly.cli.train_cli import _train_cli\nfrom lightly.cli.embed_cli import _embed_cli\nfrom lightly.cli.lightly_cli import _lightly_cli\nimport lightly.cli as cli\n\nimport yaml\nimport os\n\n\ndef _get_config_path(config_path):\n \"\"\"Find path to yaml config file\n\n Args:\n config_path: (str) Path to config.yaml file\n\n Returns:\n Path to config.yaml if specified else default config.yaml\n\n Raises:\n ValueError: If the config_path is not None but doesn't exist\n\n \"\"\"\n if config_path is None:\n dirname = os.path.dirname(cli.__file__)\n config_path = os.path.join(dirname, 'config/config.yaml')\n if not os.path.exists(config_path):\n raise ValueError(\"Config path {} does not exist!\".format(config_path))\n\n return config_path\n\n\ndef _load_config_file(config_path):\n \"\"\"Load a yaml config file\n\n Args:\n config_path: (str) Path to config.yaml file\n\n Returns:\n Dictionary with configs from config.yaml\n\n \"\"\"\n Loader = yaml.FullLoader\n with open(config_path, 'r') as config_file:\n cfg = yaml.load(config_file, Loader=Loader)\n\n return cfg\n\n\ndef _add_kwargs(cfg, kwargs):\n \"\"\"Add keyword arguments to config\n\n Args:\n cfg: (dict) Dictionary of configs from config.yaml\n kwargs: (dict) Dictionary of keyword arguments\n\n Returns:\n Union of cfg and kwargs\n\n \"\"\"\n for key, item in kwargs.items():\n if isinstance(item, dict):\n if key in cfg:\n cfg[key] = _add_kwargs(cfg[key], item)\n else:\n cfg[key] = item\n else:\n cfg[key] = item\n return cfg\n\n\ndef train_model_and_embed_images(config_path: str = None, **kwargs):\n \"\"\"Train a self-supervised model and use it to embed images.\n\n Calls the same function as lightly-magic. All arguments passed to\n lightly-magic can also be passed to this function (see below for an\n example).\n\n Args:\n config_path:\n Path to config.yaml. If None, the default configs will be used.\n **kwargs:\n Overwrite default configs py passing keyword arguments.\n\n Returns:\n Embeddings, labels, and filenames of the images.\n\n Examples:\n >>> import lightly\n >>>\n >>> # train a model and embed images with default configs\n >>> embeddings, _, _ = lightly.train_model_and_embed_images(\n >>> input_dir='path/to/data')\n >>>\n >>> #\u00a0train a model and embed images with separate config file\n >>> my_config_path = 'my/config/file.yaml'\n >>> embeddings, _, _ = lightly.train_model_and_embed_images(\n >>> input_dir='path/to/data', config_path=my_config_path)\n >>>\n >>> # train a model and embed images with default settings + overwrites\n >>> my_trainer = {max_epochs: 10}\n >>> embeddings, _, _ = lightly.train_model_and_embed_images(\n >>> input_dir='path/to/data', trainer=my_trainer)\n >>> #\u00a0the command above is equivalent to:\n >>> #\u00a0lightly-magic input_dir='path/to/data' trainer.max_epochs=10\n\n \"\"\"\n config_path = _get_config_path(config_path)\n config_args = _load_config_file(config_path)\n config_args = _add_kwargs(config_args, kwargs)\n return _lightly_cli(config_args, is_cli_call=False)\n\n\ndef train_embedding_model(config_path: str = None, **kwargs):\n \"\"\"Train a self-supervised model.\n\n Calls the same function as lightly-train. All arguments passed to\n lightly-train can also be passed to this function (see below for an\n example).\n\n Args:\n config_path:\n Path to config.yaml. If None, the default configs will be used.\n **kwargs:\n Overwrite default configs py passing keyword arguments.\n\n Returns:\n Path to checkpoint of the trained embedding model.\n\n Examples:\n >>> import lightly\n >>>\n >>> # train a model with default configs\n >>> checkpoint_path = lightly.train_embedding_model(\n >>> input_dir='path/to/data')\n >>>\n >>> #\u00a0train a model with separate config file\n >>> my_config_path = 'my/config/file.yaml'\n >>> checkpoint_path = lightly.train_embedding_model(\n >>> input_dir='path/to/data', config_path=my_config_path)\n >>>\n >>> # train a model with default settings and overwrites: large batch\n >>> # sizes are benefitial for self-supervised training and more \n >>> #\u00a0workers speed up the dataloading process.\n >>> my_loader = {\n >>> batch_size: 100,\n >>> num_workers: 8,\n >>> }\n >>> checkpoint_path = lightly.train_embedding_model(\n >>> input_dir='path/to/data', loader=my_loader)\n >>> #\u00a0the command above is equivalent to:\n >>> #\u00a0lightly-train input_dir='path/to/data' loader.batch_size=100 loader.num_workers=8\n \"\"\"\n config_path = _get_config_path(config_path)\n config_args = _load_config_file(config_path)\n config_args = _add_kwargs(config_args, kwargs)\n\n return _train_cli(config_args, is_cli_call=False)\n\n\ndef embed_images(checkpoint: str, config_path: str = None, **kwargs):\n \"\"\"Embed images with a self-supervised model.\n\n Calls the same function as lightly-embed. All arguments passed to\n lightly-embed can also be passed to this function (see below for an\n example).\n\n Args:\n checkpoint:\n Path to the checkpoint file for the embedding model.\n config_path:\n Path to config.yaml. If None, the default configs will be used.\n **kwargs:\n Overwrite default configs py passing keyword arguments.\n\n Returns:\n Embeddings, labels, and filenames of the images.\n\n Examples:\n >>> import lightly\n >>> my_checkpoint_path = 'path/to/checkpoint.ckpt'\n >>>\n >>> # embed images with default configs\n >>> embeddings, _, _ = lightly.embed_images(\n >>> my_checkpoint_path, input_dir='path/to/data')\n >>>\n >>> #\u00a0embed images with separate config file\n >>> my_config_path = 'my/config/file.yaml'\n >>> embeddings, _, _ = lightly.embed_images(\n >>> my_checkpoint_path, input_dir='path/to/data', config_path=my_config_path)\n >>>\n >>> # embed images with default settings and overwrites: at inference,\n >>> #\u00a0we can use larger input_sizes because it requires less memory.\n >>> my_collate = {input_size: 256}\n >>> embeddings, _, _ = lightly.embed_images(\n >>> my_checkpoint_path, input_dir='path/to/data', collate=my_collate)\n >>> #\u00a0the command above is equivalent to:\n >>> #\u00a0lightly-embed input_dir='path/to/data' collate.input_size=256\n\n \"\"\"\n config_path = _get_config_path(config_path)\n config_args = _load_config_file(config_path)\n config_args = _add_kwargs(config_args, kwargs)\n\n config_args['checkpoint'] = checkpoint\n\n return _embed_cli(config_args, is_cli_call=False)\n", "path": "lightly/core.py"}]} | 3,743 | 964 |
gh_patches_debug_632 | rasdani/github-patches | git_diff | pex-tool__pex-2245 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.147
On the docket:
+ [x] pex does not use .pip/pip.conf to resolve packages #336 / #838
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.146"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.146"
+__version__ = "2.1.147"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.146\"\n+__version__ = \"2.1.147\"\n", "issue": "Release 2.1.147\nOn the docket:\r\n+ [x] pex does not use .pip/pip.conf to resolve packages #336 / #838\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.146\"\n", "path": "pex/version.py"}]} | 626 | 98 |
gh_patches_debug_2936 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2387 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Internal Server Errors (e.g. on delete of user)
**Describe the bug**
Internal server error for some actions. I have set up a dockerless installation and am able to access the application and the admin pages. However, some actions create errors. For example:
**To Reproduce**
Steps to reproduce the behavior:
1. Clicking delete user after providing admin password. Browser shows internal server error. Error in application is:
```
Internal Server Error: /settings/reports/2/delete
Traceback (most recent call last):
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 204, in _get_response
response = response.render()
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py", line 105, in render
self.content = self.rendered_content
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py", line 81, in rendered_content
template = self.resolve_template(self.template_name)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py", line 65, in resolve_template
return get_template(template, using=self.using)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/loader.py", line 19, in get_template
raise TemplateDoesNotExist(template_name, chain=chain)
django.template.exceptions.TemplateDoesNotExist: user_admin/user.html
```
</issue>
<code>
[start of bookwyrm/views/admin/reports.py]
1 """ moderation via flagged posts and users """
2 from django.contrib.auth.decorators import login_required, permission_required
3 from django.core.paginator import Paginator
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import get_object_or_404, redirect
6 from django.template.response import TemplateResponse
7 from django.utils.decorators import method_decorator
8 from django.views import View
9
10 from bookwyrm import forms, models
11 from bookwyrm.settings import PAGE_LENGTH
12
13
14 # pylint: disable=no-self-use
15 @method_decorator(login_required, name="dispatch")
16 @method_decorator(
17 permission_required("bookwyrm.moderate_user", raise_exception=True),
18 name="dispatch",
19 )
20 @method_decorator(
21 permission_required("bookwyrm.moderate_post", raise_exception=True),
22 name="dispatch",
23 )
24 class ReportsAdmin(View):
25 """list of reports"""
26
27 def get(self, request):
28 """view current reports"""
29 filters = {}
30
31 resolved = request.GET.get("resolved") == "true"
32 server = request.GET.get("server")
33 if server:
34 filters["user__federated_server__server_name"] = server
35 username = request.GET.get("username")
36 if username:
37 filters["user__username__icontains"] = username
38 filters["resolved"] = resolved
39
40 reports = models.Report.objects.filter(**filters)
41 paginated = Paginator(reports, PAGE_LENGTH)
42 page = paginated.get_page(request.GET.get("page"))
43 data = {
44 "resolved": resolved,
45 "server": server,
46 "reports": page,
47 "page_range": paginated.get_elided_page_range(
48 page.number, on_each_side=2, on_ends=1
49 ),
50 }
51 return TemplateResponse(request, "settings/reports/reports.html", data)
52
53
54 @method_decorator(login_required, name="dispatch")
55 @method_decorator(
56 permission_required("bookwyrm.moderate_user", raise_exception=True),
57 name="dispatch",
58 )
59 @method_decorator(
60 permission_required("bookwyrm.moderate_post", raise_exception=True),
61 name="dispatch",
62 )
63 class ReportAdmin(View):
64 """view a specific report"""
65
66 def get(self, request, report_id):
67 """load a report"""
68 data = {
69 "report": get_object_or_404(models.Report, id=report_id),
70 "group_form": forms.UserGroupForm(),
71 }
72 return TemplateResponse(request, "settings/reports/report.html", data)
73
74 def post(self, request, report_id):
75 """comment on a report"""
76 report = get_object_or_404(models.Report, id=report_id)
77 models.ReportComment.objects.create(
78 user=request.user,
79 report=report,
80 note=request.POST.get("note"),
81 )
82 return redirect("settings-report", report.id)
83
84
85 @login_required
86 @permission_required("bookwyrm.moderate_user")
87 def suspend_user(_, user_id):
88 """mark an account as inactive"""
89 user = get_object_or_404(models.User, id=user_id)
90 user.is_active = False
91 user.deactivation_reason = "moderator_suspension"
92 # this isn't a full deletion, so we don't want to tell the world
93 user.save(broadcast=False)
94 return redirect("settings-user", user.id)
95
96
97 @login_required
98 @permission_required("bookwyrm.moderate_user")
99 def unsuspend_user(_, user_id):
100 """mark an account as inactive"""
101 user = get_object_or_404(models.User, id=user_id)
102 user.is_active = True
103 user.deactivation_reason = None
104 # this isn't a full deletion, so we don't want to tell the world
105 user.save(broadcast=False)
106 return redirect("settings-user", user.id)
107
108
109 @login_required
110 @permission_required("bookwyrm.moderate_user")
111 def moderator_delete_user(request, user_id):
112 """permanently delete a user"""
113 user = get_object_or_404(models.User, id=user_id)
114
115 # we can't delete users on other instances
116 if not user.local:
117 raise PermissionDenied()
118
119 form = forms.DeleteUserForm(request.POST, instance=user)
120
121 moderator = models.User.objects.get(id=request.user.id)
122 # check the moderator's password
123 if form.is_valid() and moderator.check_password(form.cleaned_data["password"]):
124 user.deactivation_reason = "moderator_deletion"
125 user.delete()
126 return redirect("settings-user", user.id)
127
128 form.errors["password"] = ["Invalid password"]
129
130 data = {"user": user, "group_form": forms.UserGroupForm(), "form": form}
131 return TemplateResponse(request, "user_admin/user.html", data)
132
133
134 @login_required
135 @permission_required("bookwyrm.moderate_post")
136 def resolve_report(_, report_id):
137 """mark a report as (un)resolved"""
138 report = get_object_or_404(models.Report, id=report_id)
139 report.resolved = not report.resolved
140 report.save()
141 if not report.resolved:
142 return redirect("settings-report", report.id)
143 return redirect("settings-reports")
144
[end of bookwyrm/views/admin/reports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/admin/reports.py b/bookwyrm/views/admin/reports.py
--- a/bookwyrm/views/admin/reports.py
+++ b/bookwyrm/views/admin/reports.py
@@ -128,7 +128,7 @@
form.errors["password"] = ["Invalid password"]
data = {"user": user, "group_form": forms.UserGroupForm(), "form": form}
- return TemplateResponse(request, "user_admin/user.html", data)
+ return TemplateResponse(request, "settings/users/user.html", data)
@login_required
| {"golden_diff": "diff --git a/bookwyrm/views/admin/reports.py b/bookwyrm/views/admin/reports.py\n--- a/bookwyrm/views/admin/reports.py\n+++ b/bookwyrm/views/admin/reports.py\n@@ -128,7 +128,7 @@\n form.errors[\"password\"] = [\"Invalid password\"]\n \n data = {\"user\": user, \"group_form\": forms.UserGroupForm(), \"form\": form}\n- return TemplateResponse(request, \"user_admin/user.html\", data)\n+ return TemplateResponse(request, \"settings/users/user.html\", data)\n \n \n @login_required\n", "issue": "Internal Server Errors (e.g. on delete of user)\n**Describe the bug**\r\nInternal server error for some actions. I have set up a dockerless installation and am able to access the application and the admin pages. However, some actions create errors. For example: \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Clicking delete user after providing admin password. Browser shows internal server error. Error in application is:\r\n\r\n```\r\nInternal Server Error: /settings/reports/2/delete\r\nTraceback (most recent call last):\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/base.py\", line 204, in _get_response\r\n response = response.render()\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py\", line 105, in render\r\n self.content = self.rendered_content\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py\", line 81, in rendered_content\r\n template = self.resolve_template(self.template_name)\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py\", line 65, in resolve_template\r\n return get_template(template, using=self.using)\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/loader.py\", line 19, in get_template\r\n raise TemplateDoesNotExist(template_name, chain=chain)\r\ndjango.template.exceptions.TemplateDoesNotExist: user_admin/user.html\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\" moderation via flagged posts and users \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportsAdmin(View):\n \"\"\"list of reports\"\"\"\n\n def get(self, request):\n \"\"\"view current reports\"\"\"\n filters = {}\n\n resolved = request.GET.get(\"resolved\") == \"true\"\n server = request.GET.get(\"server\")\n if server:\n filters[\"user__federated_server__server_name\"] = server\n username = request.GET.get(\"username\")\n if username:\n filters[\"user__username__icontains\"] = username\n filters[\"resolved\"] = resolved\n\n reports = models.Report.objects.filter(**filters)\n paginated = Paginator(reports, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"resolved\": resolved,\n \"server\": server,\n \"reports\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n return TemplateResponse(request, \"settings/reports/reports.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportAdmin(View):\n \"\"\"view a specific report\"\"\"\n\n def get(self, request, report_id):\n \"\"\"load a report\"\"\"\n data = {\n \"report\": get_object_or_404(models.Report, id=report_id),\n \"group_form\": forms.UserGroupForm(),\n }\n return TemplateResponse(request, \"settings/reports/report.html\", data)\n\n def post(self, request, report_id):\n \"\"\"comment on a report\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n models.ReportComment.objects.create(\n user=request.user,\n report=report,\n note=request.POST.get(\"note\"),\n )\n return redirect(\"settings-report\", report.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef suspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = False\n user.deactivation_reason = \"moderator_suspension\"\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef unsuspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = True\n user.deactivation_reason = None\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef moderator_delete_user(request, user_id):\n \"\"\"permanently delete a user\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n\n # we can't delete users on other instances\n if not user.local:\n raise PermissionDenied()\n\n form = forms.DeleteUserForm(request.POST, instance=user)\n\n moderator = models.User.objects.get(id=request.user.id)\n # check the moderator's password\n if form.is_valid() and moderator.check_password(form.cleaned_data[\"password\"]):\n user.deactivation_reason = \"moderator_deletion\"\n user.delete()\n return redirect(\"settings-user\", user.id)\n\n form.errors[\"password\"] = [\"Invalid password\"]\n\n data = {\"user\": user, \"group_form\": forms.UserGroupForm(), \"form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_post\")\ndef resolve_report(_, report_id):\n \"\"\"mark a report as (un)resolved\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n report.resolved = not report.resolved\n report.save()\n if not report.resolved:\n return redirect(\"settings-report\", report.id)\n return redirect(\"settings-reports\")\n", "path": "bookwyrm/views/admin/reports.py"}]} | 2,346 | 127 |
gh_patches_debug_12622 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-4754 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BigQuery: SchemaField cannot load resource with missing `mode` property
The [mode property](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.mode) is optional in the REST API, but this code fails:
```
SchemaField.from_api_repr({'name': 'colname', 'type': 'INT64'})
```
I believe this is due to the line
```
mode=api_repr['mode'].upper(),
```
which does not properly handle when the `mode` property is missing.
Related to https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3723 as it is often convenient to omit the mode parameter when defining a schema file by hand.
</issue>
<code>
[start of bigquery/google/cloud/bigquery/schema.py]
1 # Copyright 2015 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Schemas for BigQuery tables / queries."""
16
17
18 class SchemaField(object):
19 """Describe a single field within a table schema.
20
21 :type name: str
22 :param name: the name of the field.
23
24 :type field_type: str
25 :param field_type: the type of the field (one of 'STRING', 'INTEGER',
26 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD').
27
28 :type mode: str
29 :param mode: the mode of the field (one of 'NULLABLE', 'REQUIRED',
30 or 'REPEATED').
31
32 :type description: str
33 :param description: optional description for the field.
34
35 :type fields: tuple of :class:`~google.cloud.bigquery.schema.SchemaField`
36 :param fields: subfields (requires ``field_type`` of 'RECORD').
37 """
38 def __init__(self, name, field_type, mode='NULLABLE',
39 description=None, fields=()):
40 self._name = name
41 self._field_type = field_type
42 self._mode = mode
43 self._description = description
44 self._fields = tuple(fields)
45
46 @classmethod
47 def from_api_repr(cls, api_repr):
48 """Return a ``SchemaField`` object deserialized from a dictionary.
49
50 Args:
51 api_repr (Mapping[str, str]): The serialized representation
52 of the SchemaField, such as what is output by
53 :meth:`to_api_repr`.
54
55 Returns:
56 google.cloud.biquery.schema.SchemaField:
57 The ``SchemaField`` object.
58 """
59 return cls(
60 field_type=api_repr['type'].upper(),
61 fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],
62 mode=api_repr['mode'].upper(),
63 name=api_repr['name'],
64 )
65
66 @property
67 def name(self):
68 """str: The name of the field."""
69 return self._name
70
71 @property
72 def field_type(self):
73 """str: The type of the field.
74
75 Will be one of 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN',
76 'TIMESTAMP' or 'RECORD'.
77 """
78 return self._field_type
79
80 @property
81 def mode(self):
82 """str: The mode of the field.
83
84 Will be one of 'NULLABLE', 'REQUIRED', or 'REPEATED'.
85 """
86 return self._mode
87
88 @property
89 def is_nullable(self):
90 """Check whether 'mode' is 'nullable'."""
91 return self._mode == 'NULLABLE'
92
93 @property
94 def description(self):
95 """Optional[str]: Description for the field."""
96 return self._description
97
98 @property
99 def fields(self):
100 """tuple: Subfields contained in this field.
101
102 If ``field_type`` is not 'RECORD', this property must be
103 empty / unset.
104 """
105 return self._fields
106
107 def to_api_repr(self):
108 """Return a dictionary representing this schema field.
109
110 Returns:
111 dict: A dictionary representing the SchemaField in a serialized
112 form.
113 """
114 # Put together the basic representation. See http://bit.ly/2hOAT5u.
115 answer = {
116 'mode': self.mode.lower(),
117 'name': self.name,
118 'type': self.field_type.lower(),
119 }
120
121 # If this is a RECORD type, then sub-fields are also included,
122 # add this to the serialized representation.
123 if self.field_type.upper() == 'RECORD':
124 answer['fields'] = [f.to_api_repr() for f in self.fields]
125
126 # Done; return the serialized dictionary.
127 return answer
128
129 def _key(self):
130 """A tuple key that uniquely describes this field.
131
132 Used to compute this instance's hashcode and evaluate equality.
133
134 Returns:
135 tuple: The contents of this
136 :class:`~google.cloud.bigquery.schema.SchemaField`.
137 """
138 return (
139 self._name,
140 self._field_type.lower(),
141 self._mode,
142 self._description,
143 self._fields,
144 )
145
146 def __eq__(self, other):
147 if not isinstance(other, SchemaField):
148 return NotImplemented
149 return self._key() == other._key()
150
151 def __ne__(self, other):
152 return not self == other
153
154 def __hash__(self):
155 return hash(self._key())
156
157 def __repr__(self):
158 return 'SchemaField{}'.format(self._key())
159
160
161 def _parse_schema_resource(info):
162 """Parse a resource fragment into a schema field.
163
164 :type info: mapping
165 :param info: should contain a "fields" key to be parsed
166
167 :rtype:
168 list of :class:`google.cloud.bigquery.schema.SchemaField`, or
169 ``NoneType``
170 :returns: a list of parsed fields, or ``None`` if no "fields" key is
171 present in ``info``.
172 """
173 if 'fields' not in info:
174 return ()
175
176 schema = []
177 for r_field in info['fields']:
178 name = r_field['name']
179 field_type = r_field['type']
180 mode = r_field.get('mode', 'NULLABLE')
181 description = r_field.get('description')
182 sub_fields = _parse_schema_resource(r_field)
183 schema.append(
184 SchemaField(name, field_type, mode, description, sub_fields))
185 return schema
186
187
188 def _build_schema_resource(fields):
189 """Generate a resource fragment for a schema.
190
191 :type fields:
192 sequence of :class:`~google.cloud.bigquery.schema.SchemaField`
193 :param fields: schema to be dumped
194
195 :rtype: mapping
196 :returns: a mapping describing the schema of the supplied fields.
197 """
198 infos = []
199 for field in fields:
200 info = {'name': field.name,
201 'type': field.field_type,
202 'mode': field.mode}
203 if field.description is not None:
204 info['description'] = field.description
205 if field.fields:
206 info['fields'] = _build_schema_resource(field.fields)
207 infos.append(info)
208 return infos
209
[end of bigquery/google/cloud/bigquery/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bigquery/google/cloud/bigquery/schema.py b/bigquery/google/cloud/bigquery/schema.py
--- a/bigquery/google/cloud/bigquery/schema.py
+++ b/bigquery/google/cloud/bigquery/schema.py
@@ -56,10 +56,13 @@
google.cloud.biquery.schema.SchemaField:
The ``SchemaField`` object.
"""
+ # Handle optional properties with default values
+ mode = api_repr.get('mode', 'NULLABLE')
+ fields = api_repr.get('fields', ())
return cls(
field_type=api_repr['type'].upper(),
- fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],
- mode=api_repr['mode'].upper(),
+ fields=[cls.from_api_repr(f) for f in fields],
+ mode=mode.upper(),
name=api_repr['name'],
)
| {"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/schema.py b/bigquery/google/cloud/bigquery/schema.py\n--- a/bigquery/google/cloud/bigquery/schema.py\n+++ b/bigquery/google/cloud/bigquery/schema.py\n@@ -56,10 +56,13 @@\n google.cloud.biquery.schema.SchemaField:\n The ``SchemaField`` object.\n \"\"\"\n+ # Handle optional properties with default values\n+ mode = api_repr.get('mode', 'NULLABLE')\n+ fields = api_repr.get('fields', ())\n return cls(\n field_type=api_repr['type'].upper(),\n- fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],\n- mode=api_repr['mode'].upper(),\n+ fields=[cls.from_api_repr(f) for f in fields],\n+ mode=mode.upper(),\n name=api_repr['name'],\n )\n", "issue": "BigQuery: SchemaField cannot load resource with missing `mode` property\nThe [mode property](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.mode) is optional in the REST API, but this code fails:\r\n\r\n```\r\nSchemaField.from_api_repr({'name': 'colname', 'type': 'INT64'})\r\n```\r\n\r\nI believe this is due to the line\r\n\r\n```\r\nmode=api_repr['mode'].upper(),\r\n```\r\n\r\nwhich does not properly handle when the `mode` property is missing.\r\n\r\nRelated to https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3723 as it is often convenient to omit the mode parameter when defining a schema file by hand.\r\n\n", "before_files": [{"content": "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Schemas for BigQuery tables / queries.\"\"\"\n\n\nclass SchemaField(object):\n \"\"\"Describe a single field within a table schema.\n\n :type name: str\n :param name: the name of the field.\n\n :type field_type: str\n :param field_type: the type of the field (one of 'STRING', 'INTEGER',\n 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD').\n\n :type mode: str\n :param mode: the mode of the field (one of 'NULLABLE', 'REQUIRED',\n or 'REPEATED').\n\n :type description: str\n :param description: optional description for the field.\n\n :type fields: tuple of :class:`~google.cloud.bigquery.schema.SchemaField`\n :param fields: subfields (requires ``field_type`` of 'RECORD').\n \"\"\"\n def __init__(self, name, field_type, mode='NULLABLE',\n description=None, fields=()):\n self._name = name\n self._field_type = field_type\n self._mode = mode\n self._description = description\n self._fields = tuple(fields)\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Return a ``SchemaField`` object deserialized from a dictionary.\n\n Args:\n api_repr (Mapping[str, str]): The serialized representation\n of the SchemaField, such as what is output by\n :meth:`to_api_repr`.\n\n Returns:\n google.cloud.biquery.schema.SchemaField:\n The ``SchemaField`` object.\n \"\"\"\n return cls(\n field_type=api_repr['type'].upper(),\n fields=[cls.from_api_repr(f) for f in api_repr.get('fields', ())],\n mode=api_repr['mode'].upper(),\n name=api_repr['name'],\n )\n\n @property\n def name(self):\n \"\"\"str: The name of the field.\"\"\"\n return self._name\n\n @property\n def field_type(self):\n \"\"\"str: The type of the field.\n\n Will be one of 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN',\n 'TIMESTAMP' or 'RECORD'.\n \"\"\"\n return self._field_type\n\n @property\n def mode(self):\n \"\"\"str: The mode of the field.\n\n Will be one of 'NULLABLE', 'REQUIRED', or 'REPEATED'.\n \"\"\"\n return self._mode\n\n @property\n def is_nullable(self):\n \"\"\"Check whether 'mode' is 'nullable'.\"\"\"\n return self._mode == 'NULLABLE'\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description for the field.\"\"\"\n return self._description\n\n @property\n def fields(self):\n \"\"\"tuple: Subfields contained in this field.\n\n If ``field_type`` is not 'RECORD', this property must be\n empty / unset.\n \"\"\"\n return self._fields\n\n def to_api_repr(self):\n \"\"\"Return a dictionary representing this schema field.\n\n Returns:\n dict: A dictionary representing the SchemaField in a serialized\n form.\n \"\"\"\n # Put together the basic representation. See http://bit.ly/2hOAT5u.\n answer = {\n 'mode': self.mode.lower(),\n 'name': self.name,\n 'type': self.field_type.lower(),\n }\n\n # If this is a RECORD type, then sub-fields are also included,\n # add this to the serialized representation.\n if self.field_type.upper() == 'RECORD':\n answer['fields'] = [f.to_api_repr() for f in self.fields]\n\n # Done; return the serialized dictionary.\n return answer\n\n def _key(self):\n \"\"\"A tuple key that uniquely describes this field.\n\n Used to compute this instance's hashcode and evaluate equality.\n\n Returns:\n tuple: The contents of this\n :class:`~google.cloud.bigquery.schema.SchemaField`.\n \"\"\"\n return (\n self._name,\n self._field_type.lower(),\n self._mode,\n self._description,\n self._fields,\n )\n\n def __eq__(self, other):\n if not isinstance(other, SchemaField):\n return NotImplemented\n return self._key() == other._key()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return 'SchemaField{}'.format(self._key())\n\n\ndef _parse_schema_resource(info):\n \"\"\"Parse a resource fragment into a schema field.\n\n :type info: mapping\n :param info: should contain a \"fields\" key to be parsed\n\n :rtype:\n list of :class:`google.cloud.bigquery.schema.SchemaField`, or\n ``NoneType``\n :returns: a list of parsed fields, or ``None`` if no \"fields\" key is\n present in ``info``.\n \"\"\"\n if 'fields' not in info:\n return ()\n\n schema = []\n for r_field in info['fields']:\n name = r_field['name']\n field_type = r_field['type']\n mode = r_field.get('mode', 'NULLABLE')\n description = r_field.get('description')\n sub_fields = _parse_schema_resource(r_field)\n schema.append(\n SchemaField(name, field_type, mode, description, sub_fields))\n return schema\n\n\ndef _build_schema_resource(fields):\n \"\"\"Generate a resource fragment for a schema.\n\n :type fields:\n sequence of :class:`~google.cloud.bigquery.schema.SchemaField`\n :param fields: schema to be dumped\n\n :rtype: mapping\n :returns: a mapping describing the schema of the supplied fields.\n \"\"\"\n infos = []\n for field in fields:\n info = {'name': field.name,\n 'type': field.field_type,\n 'mode': field.mode}\n if field.description is not None:\n info['description'] = field.description\n if field.fields:\n info['fields'] = _build_schema_resource(field.fields)\n infos.append(info)\n return infos\n", "path": "bigquery/google/cloud/bigquery/schema.py"}]} | 2,663 | 191 |
gh_patches_debug_26000 | rasdani/github-patches | git_diff | oppia__oppia-17178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: Quiz does not automatically move to next question on input
### Describe the bug
I was taking a lesson on decimal place values and when I input a correct answer, I get a message that
_"Note: When you select an option, the quiz will automatically move to the next question."_
But it doesn't move. It still asks me to click on 'continue'

### Steps To Reproduce
1. Go Math Lessons
2. Click on Decimals (This occurs in other lessons too)
3. Take the introductory quizzes
4. Take quiz on Decimals
5. Answer it correctly
6. See error
### Expected Behavior
I expect to be taken to next page when I get my answer correctly, as displayed by the message.
### Screenshots/Videos

### What device are you using?
Desktop
### Operating System
Windows
### What browsers are you seeing the problem on?
Chrome
### Browser version
Version 111.0.5563.65 (Official Build) (64-bit)
### Additional context
_No response_
[BUG]: Quiz does not automatically move to next question on input
### Describe the bug
I was taking a lesson on decimal place values and when I input a correct answer, I get a message that
_"Note: When you select an option, the quiz will automatically move to the next question."_
But it doesn't move. It still asks me to click on 'continue'

### Steps To Reproduce
1. Go Math Lessons
2. Click on Decimals (This occurs in other lessons too)
3. Take the introductory quizzes
4. Take quiz on Decimals
5. Answer it correctly
6. See error
### Expected Behavior
I expect to be taken to next page when I get my answer correctly, as displayed by the message.
### Screenshots/Videos

### What device are you using?
Desktop
### Operating System
Windows
### What browsers are you seeing the problem on?
Chrome
### Browser version
Version 111.0.5563.65 (Official Build) (64-bit)
### Additional context
_No response_
</issue>
<code>
[start of extensions/interactions/ItemSelectionInput/ItemSelectionInput.py]
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Python configuration for ItemSelectionInput interaction."""
18
19 from __future__ import annotations
20
21 from extensions.interactions import base
22
23 from typing import List
24
25 MYPY = False
26 if MYPY: # pragma: no cover
27 from extensions import domain
28
29
30 class ItemSelectionInput(base.BaseInteraction):
31 """Interaction for item selection input."""
32
33 name: str = 'Item Selection'
34 description: str = (
35 'Allows learners to select various options.')
36 display_mode: str = base.DISPLAY_MODE_INLINE
37 _dependency_ids: List[str] = []
38 answer_type: str = 'SetOfTranslatableHtmlContentIds'
39 # Radio buttons get unselected when specifying a solution. This needs to be
40 # fixed before solution feature can support this interaction.
41 can_have_solution: bool = False
42 # ItemSelectionInput's submit button is dynamic and is handled
43 # separately.
44 show_generic_submit_button: bool = False
45
46 _customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{
47 'name': 'minAllowableSelectionCount',
48 'description': 'Minimum number of selections permitted',
49 'schema': {
50 'type': 'int',
51 'validators': [{
52 'id': 'is_at_least',
53 'min_value': 0,
54 }],
55 },
56 'default_value': 1,
57 }, {
58 'name': 'maxAllowableSelectionCount',
59 'description': 'Maximum number of selections permitted',
60 'schema': {
61 'type': 'int',
62 'validators': [{
63 'id': 'is_at_least',
64 'min_value': 1,
65 }],
66 },
67 'default_value': 1,
68 }, {
69 'name': 'choices',
70 'description': 'Items for selection',
71 'schema': {
72 'type': 'list',
73 'validators': [{
74 'id': 'has_unique_subtitled_contents'
75 }],
76 'items': {
77 'type': 'custom',
78 'obj_type': 'SubtitledHtml',
79 'validators': [{
80 'id': 'has_subtitled_html_non_empty'
81 }],
82 'replacement_ui_config': {
83 'html': {
84 'hide_complex_extensions': True,
85 'placeholder': 'Sample item answer',
86 }
87 }
88 },
89 'ui_config': {
90 'add_element_text': 'Add item for selection',
91 }
92 },
93 'default_value': [{
94 'content_id': None,
95 'html': ''
96 }],
97 }]
98
99 _answer_visualization_specs: List[base.AnswerVisualizationSpecsDict] = [{
100 # Table with keyed answer counts for top N answers.
101 'id': 'EnumeratedFrequencyTable',
102 'options': {
103 'column_headers': ['Answer (click to expand/collapse)', 'Count'],
104 'title': 'Top answers',
105 },
106 'calculation_id': 'Top10AnswerFrequencies',
107 'addressed_info_is_supported': True,
108 }]
109
[end of extensions/interactions/ItemSelectionInput/ItemSelectionInput.py]
[start of extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py]
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Python configuration for MultipleChoiceInput interaction."""
18
19 from __future__ import annotations
20
21 from extensions.interactions import base
22
23 from typing import List, Optional
24
25 MYPY = False
26 if MYPY: # pragma: no cover
27 from extensions import domain
28
29
30 class MultipleChoiceInput(base.BaseInteraction):
31 """Interaction for multiple choice input."""
32
33 name: str = 'Multiple Choice'
34 description: str = (
35 'Allows learners to select one of a list of multiple-choice options.')
36 display_mode: str = base.DISPLAY_MODE_INLINE
37 _dependency_ids: List[str] = []
38 answer_type: str = 'NonnegativeInt'
39 instructions: Optional[str] = None
40 narrow_instructions: Optional[str] = None
41 needs_summary: bool = False
42 # Radio buttons get unselected when specifying a solution. This needs to be
43 # fixed before solution feature can support this interaction.
44 can_have_solution: bool = False
45 show_generic_submit_button: bool = False
46
47 _customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{
48 'name': 'choices',
49 'description': 'Multiple Choice options',
50 'schema': {
51 'type': 'list',
52 'validators': [
53 {
54 'id': 'has_length_at_least',
55 'min_value': 1,
56 },
57 {
58 'id': 'has_unique_subtitled_contents'
59 }
60 ],
61 'items': {
62 'type': 'custom',
63 'obj_type': 'SubtitledHtml',
64 'validators': [{
65 'id': 'has_subtitled_html_non_empty'
66 }],
67 'replacement_ui_config': {
68 'html': {
69 'hide_complex_extensions': True,
70 'placeholder': (
71 'Enter an option for the learner to select'),
72 }
73 }
74 },
75 'ui_config': {
76 'add_element_text': 'Add multiple choice option',
77 }
78 },
79 'default_value': [{
80 'content_id': None,
81 'html': ''
82 }],
83 }, {
84 'name': 'showChoicesInShuffledOrder',
85 'description': 'Shuffle answer choices',
86 'schema': {
87 'type': 'bool',
88 },
89 'default_value': True
90 }]
91
92 _answer_visualization_specs: List[base.AnswerVisualizationSpecsDict] = [{
93 'id': 'SortedTiles',
94 'options': {'header': 'Top answers', 'use_percentages': True},
95 'calculation_id': 'AnswerFrequencies',
96 'addressed_info_is_supported': True,
97 }]
98
[end of extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py b/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py
--- a/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py
+++ b/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py
@@ -39,9 +39,8 @@
# Radio buttons get unselected when specifying a solution. This needs to be
# fixed before solution feature can support this interaction.
can_have_solution: bool = False
- # ItemSelectionInput's submit button is dynamic and is handled
- # separately.
- show_generic_submit_button: bool = False
+ # ItemSelectionInput interaction must contain a generic submit button.
+ show_generic_submit_button: bool = True
_customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{
'name': 'minAllowableSelectionCount',
diff --git a/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py b/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py
--- a/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py
+++ b/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py
@@ -42,7 +42,8 @@
# Radio buttons get unselected when specifying a solution. This needs to be
# fixed before solution feature can support this interaction.
can_have_solution: bool = False
- show_generic_submit_button: bool = False
+ # MultipleChoiceInput interaction must contain a generic submit button.
+ show_generic_submit_button: bool = True
_customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{
'name': 'choices',
| {"golden_diff": "diff --git a/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py b/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py\n--- a/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py\n+++ b/extensions/interactions/ItemSelectionInput/ItemSelectionInput.py\n@@ -39,9 +39,8 @@\n # Radio buttons get unselected when specifying a solution. This needs to be\n # fixed before solution feature can support this interaction.\n can_have_solution: bool = False\n- # ItemSelectionInput's submit button is dynamic and is handled\n- # separately.\n- show_generic_submit_button: bool = False\n+ # ItemSelectionInput interaction must contain a generic submit button.\n+ show_generic_submit_button: bool = True\n \n _customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{\n 'name': 'minAllowableSelectionCount',\ndiff --git a/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py b/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py\n--- a/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py\n+++ b/extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py\n@@ -42,7 +42,8 @@\n # Radio buttons get unselected when specifying a solution. This needs to be\n # fixed before solution feature can support this interaction.\n can_have_solution: bool = False\n- show_generic_submit_button: bool = False\n+ # MultipleChoiceInput interaction must contain a generic submit button.\n+ show_generic_submit_button: bool = True\n \n _customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{\n 'name': 'choices',\n", "issue": "[BUG]: Quiz does not automatically move to next question on input\n### Describe the bug\n\nI was taking a lesson on decimal place values and when I input a correct answer, I get a message that \r\n\r\n_\"Note: When you select an option, the quiz will automatically move to the next question.\"_\r\n\r\nBut it doesn't move. It still asks me to click on 'continue'\r\n\r\n\r\n\n\n### Steps To Reproduce\n\n1. Go Math Lessons\r\n2. Click on Decimals (This occurs in other lessons too)\r\n3. Take the introductory quizzes\r\n4. Take quiz on Decimals\r\n5. Answer it correctly\r\n6. See error\n\n### Expected Behavior\n\nI expect to be taken to next page when I get my answer correctly, as displayed by the message.\n\n### Screenshots/Videos\n\n\r\n\n\n### What device are you using?\n\nDesktop\n\n### Operating System\n\nWindows\n\n### What browsers are you seeing the problem on?\n\nChrome\n\n### Browser version\n\nVersion 111.0.5563.65 (Official Build) (64-bit)\n\n### Additional context\n\n_No response_\n[BUG]: Quiz does not automatically move to next question on input\n### Describe the bug\n\nI was taking a lesson on decimal place values and when I input a correct answer, I get a message that \r\n\r\n_\"Note: When you select an option, the quiz will automatically move to the next question.\"_\r\n\r\nBut it doesn't move. It still asks me to click on 'continue'\r\n\r\n\r\n\n\n### Steps To Reproduce\n\n1. Go Math Lessons\r\n2. Click on Decimals (This occurs in other lessons too)\r\n3. Take the introductory quizzes\r\n4. Take quiz on Decimals\r\n5. Answer it correctly\r\n6. See error\n\n### Expected Behavior\n\nI expect to be taken to next page when I get my answer correctly, as displayed by the message.\n\n### Screenshots/Videos\n\n\r\n\n\n### What device are you using?\n\nDesktop\n\n### Operating System\n\nWindows\n\n### What browsers are you seeing the problem on?\n\nChrome\n\n### Browser version\n\nVersion 111.0.5563.65 (Official Build) (64-bit)\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python configuration for ItemSelectionInput interaction.\"\"\"\n\nfrom __future__ import annotations\n\nfrom extensions.interactions import base\n\nfrom typing import List\n\nMYPY = False\nif MYPY: # pragma: no cover\n from extensions import domain\n\n\nclass ItemSelectionInput(base.BaseInteraction):\n \"\"\"Interaction for item selection input.\"\"\"\n\n name: str = 'Item Selection'\n description: str = (\n 'Allows learners to select various options.')\n display_mode: str = base.DISPLAY_MODE_INLINE\n _dependency_ids: List[str] = []\n answer_type: str = 'SetOfTranslatableHtmlContentIds'\n # Radio buttons get unselected when specifying a solution. This needs to be\n # fixed before solution feature can support this interaction.\n can_have_solution: bool = False\n # ItemSelectionInput's submit button is dynamic and is handled\n # separately.\n show_generic_submit_button: bool = False\n\n _customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{\n 'name': 'minAllowableSelectionCount',\n 'description': 'Minimum number of selections permitted',\n 'schema': {\n 'type': 'int',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': 0,\n }],\n },\n 'default_value': 1,\n }, {\n 'name': 'maxAllowableSelectionCount',\n 'description': 'Maximum number of selections permitted',\n 'schema': {\n 'type': 'int',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': 1,\n }],\n },\n 'default_value': 1,\n }, {\n 'name': 'choices',\n 'description': 'Items for selection',\n 'schema': {\n 'type': 'list',\n 'validators': [{\n 'id': 'has_unique_subtitled_contents'\n }],\n 'items': {\n 'type': 'custom',\n 'obj_type': 'SubtitledHtml',\n 'validators': [{\n 'id': 'has_subtitled_html_non_empty'\n }],\n 'replacement_ui_config': {\n 'html': {\n 'hide_complex_extensions': True,\n 'placeholder': 'Sample item answer',\n }\n }\n },\n 'ui_config': {\n 'add_element_text': 'Add item for selection',\n }\n },\n 'default_value': [{\n 'content_id': None,\n 'html': ''\n }],\n }]\n\n _answer_visualization_specs: List[base.AnswerVisualizationSpecsDict] = [{\n # Table with keyed answer counts for top N answers.\n 'id': 'EnumeratedFrequencyTable',\n 'options': {\n 'column_headers': ['Answer (click to expand/collapse)', 'Count'],\n 'title': 'Top answers',\n },\n 'calculation_id': 'Top10AnswerFrequencies',\n 'addressed_info_is_supported': True,\n }]\n", "path": "extensions/interactions/ItemSelectionInput/ItemSelectionInput.py"}, {"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python configuration for MultipleChoiceInput interaction.\"\"\"\n\nfrom __future__ import annotations\n\nfrom extensions.interactions import base\n\nfrom typing import List, Optional\n\nMYPY = False\nif MYPY: # pragma: no cover\n from extensions import domain\n\n\nclass MultipleChoiceInput(base.BaseInteraction):\n \"\"\"Interaction for multiple choice input.\"\"\"\n\n name: str = 'Multiple Choice'\n description: str = (\n 'Allows learners to select one of a list of multiple-choice options.')\n display_mode: str = base.DISPLAY_MODE_INLINE\n _dependency_ids: List[str] = []\n answer_type: str = 'NonnegativeInt'\n instructions: Optional[str] = None\n narrow_instructions: Optional[str] = None\n needs_summary: bool = False\n # Radio buttons get unselected when specifying a solution. This needs to be\n # fixed before solution feature can support this interaction.\n can_have_solution: bool = False\n show_generic_submit_button: bool = False\n\n _customization_arg_specs: List[domain.CustomizationArgSpecsDict] = [{\n 'name': 'choices',\n 'description': 'Multiple Choice options',\n 'schema': {\n 'type': 'list',\n 'validators': [\n {\n 'id': 'has_length_at_least',\n 'min_value': 1,\n },\n {\n 'id': 'has_unique_subtitled_contents'\n }\n ],\n 'items': {\n 'type': 'custom',\n 'obj_type': 'SubtitledHtml',\n 'validators': [{\n 'id': 'has_subtitled_html_non_empty'\n }],\n 'replacement_ui_config': {\n 'html': {\n 'hide_complex_extensions': True,\n 'placeholder': (\n 'Enter an option for the learner to select'),\n }\n }\n },\n 'ui_config': {\n 'add_element_text': 'Add multiple choice option',\n }\n },\n 'default_value': [{\n 'content_id': None,\n 'html': ''\n }],\n }, {\n 'name': 'showChoicesInShuffledOrder',\n 'description': 'Shuffle answer choices',\n 'schema': {\n 'type': 'bool',\n },\n 'default_value': True\n }]\n\n _answer_visualization_specs: List[base.AnswerVisualizationSpecsDict] = [{\n 'id': 'SortedTiles',\n 'options': {'header': 'Top answers', 'use_percentages': True},\n 'calculation_id': 'AnswerFrequencies',\n 'addressed_info_is_supported': True,\n }]\n", "path": "extensions/interactions/MultipleChoiceInput/MultipleChoiceInput.py"}]} | 3,183 | 374 |
gh_patches_debug_8057 | rasdani/github-patches | git_diff | dmlc__dgl-1478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Loading dataset PROTEINS leads to IndexError
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
When loading dataset PROTEINS using function `LegacyTUDataset()`, raise `IndexError: too many indices for array`
## To Reproduce
Steps to reproduce the behavior:
```python
from dgl.data import LegacyTUDataset
dataset = LegacyTUDataset(name='PROTEINS')
```
The error message as following:
```
~/opt/anaconda3/lib/python3.7/site-packages/dgl/data/tu.py in __init__(self, name, use_pandas, hidden_size, max_allow_node)
78 self._file_path("node_attributes"), delimiter=",")
79 for idxs, g in zip(node_idx_list, self.graph_lists):
---> 80 g.ndata['feat'] = DS_node_attr[idxs, :]
81 self.data_mode = "node_attr"
82 except IOError:
IndexError: too many indices for array
```
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
Successfully loading PROTEINS dataset
## Environment
- DGL Version (e.g., 1.0): 0.4.2
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): PyTorch
- OS (e.g., Linux): MacOS X
- How you installed DGL (`conda`, `pip`, source): pip
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version (if applicable):
- GPU models and configuration (e.g. V100):
- Any other relevant information:
## Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of python/dgl/data/tu.py]
1 from __future__ import absolute_import
2 import numpy as np
3 import os
4 import random
5
6 from .utils import download, extract_archive, get_download_dir, loadtxt
7 from ..graph import DGLGraph
8
9 class LegacyTUDataset(object):
10 """
11 TUDataset contains lots of graph kernel datasets for graph classification.
12 Use provided node feature by default. If no feature provided, use one-hot node label instead.
13 If neither labels provided, use constant for node feature.
14
15 :param name: Dataset Name, such as `ENZYMES`, `DD`, `COLLAB`
16 :param use_pandas: Default: False.
17 Numpy's file read function has performance issue when file is large,
18 using pandas can be faster.
19 :param hidden_size: Default 10. Some dataset doesn't contain features.
20 Use constant node features initialization instead, with hidden size as `hidden_size`.
21
22 """
23
24 _url = r"https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets/{}.zip"
25
26 def __init__(self, name, use_pandas=False,
27 hidden_size=10, max_allow_node=None):
28
29 self.name = name
30 self.hidden_size = hidden_size
31 self.extract_dir = self._download()
32 self.data_mode = None
33 self.max_allow_node = max_allow_node
34
35 if use_pandas:
36 import pandas as pd
37 DS_edge_list = self._idx_from_zero(
38 pd.read_csv(self._file_path("A"), delimiter=",", dtype=int, header=None).values)
39 else:
40 DS_edge_list = self._idx_from_zero(
41 np.genfromtxt(self._file_path("A"), delimiter=",", dtype=int))
42
43 DS_indicator = self._idx_from_zero(
44 np.genfromtxt(self._file_path("graph_indicator"), dtype=int))
45 DS_graph_labels = self._idx_from_zero(
46 np.genfromtxt(self._file_path("graph_labels"), dtype=int))
47
48 g = DGLGraph()
49 g.add_nodes(int(DS_edge_list.max()) + 1)
50 g.add_edges(DS_edge_list[:, 0], DS_edge_list[:, 1])
51
52 node_idx_list = []
53 self.max_num_node = 0
54 for idx in range(np.max(DS_indicator) + 1):
55 node_idx = np.where(DS_indicator == idx)
56 node_idx_list.append(node_idx[0])
57 if len(node_idx[0]) > self.max_num_node:
58 self.max_num_node = len(node_idx[0])
59
60 self.graph_lists = g.subgraphs(node_idx_list)
61 self.num_labels = max(DS_graph_labels) + 1
62 self.graph_labels = DS_graph_labels
63
64 try:
65 DS_node_labels = self._idx_from_zero(
66 np.loadtxt(self._file_path("node_labels"), dtype=int))
67 g.ndata['node_label'] = DS_node_labels
68 one_hot_node_labels = self._to_onehot(DS_node_labels)
69 for idxs, g in zip(node_idx_list, self.graph_lists):
70 g.ndata['feat'] = one_hot_node_labels[idxs, :]
71 self.data_mode = "node_label"
72 except IOError:
73 print("No Node Label Data")
74
75 try:
76 DS_node_attr = np.loadtxt(
77 self._file_path("node_attributes"), delimiter=",")
78 for idxs, g in zip(node_idx_list, self.graph_lists):
79 g.ndata['feat'] = DS_node_attr[idxs, :]
80 self.data_mode = "node_attr"
81 except IOError:
82 print("No Node Attribute Data")
83
84 if 'feat' not in g.ndata.keys():
85 for idxs, g in zip(node_idx_list, self.graph_lists):
86 g.ndata['feat'] = np.ones((g.number_of_nodes(), hidden_size))
87 self.data_mode = "constant"
88 print(
89 "Use Constant one as Feature with hidden size {}".format(hidden_size))
90
91 # remove graphs that are too large by user given standard
92 # optional pre-processing steop in conformity with Rex Ying's original
93 # DiffPool implementation
94 if self.max_allow_node:
95 preserve_idx = []
96 print("original dataset length : ", len(self.graph_lists))
97 for (i, g) in enumerate(self.graph_lists):
98 if g.number_of_nodes() <= self.max_allow_node:
99 preserve_idx.append(i)
100 self.graph_lists = [self.graph_lists[i] for i in preserve_idx]
101 print(
102 "after pruning graphs that are too big : ", len(
103 self.graph_lists))
104 self.graph_labels = [self.graph_labels[i] for i in preserve_idx]
105 self.max_num_node = self.max_allow_node
106
107 def __getitem__(self, idx):
108 """Get the i^th sample.
109 Paramters
110 ---------
111 idx : int
112 The sample index.
113 Returns
114 -------
115 (dgl.DGLGraph, int)
116 DGLGraph with node feature stored in `feat` field and node label in `node_label` if available.
117 And its label.
118 """
119 g = self.graph_lists[idx]
120 return g, self.graph_labels[idx]
121
122 def __len__(self):
123 return len(self.graph_lists)
124
125 def _download(self):
126 download_dir = get_download_dir()
127 zip_file_path = os.path.join(
128 download_dir,
129 "tu_{}.zip".format(
130 self.name))
131 download(self._url.format(self.name), path=zip_file_path)
132 extract_dir = os.path.join(download_dir, "tu_{}".format(self.name))
133 extract_archive(zip_file_path, extract_dir)
134 return extract_dir
135
136 def _file_path(self, category):
137 return os.path.join(self.extract_dir, self.name,
138 "{}_{}.txt".format(self.name, category))
139
140 @staticmethod
141 def _idx_from_zero(idx_tensor):
142 return idx_tensor - np.min(idx_tensor)
143
144 @staticmethod
145 def _to_onehot(label_tensor):
146 label_num = label_tensor.shape[0]
147 assert np.min(label_tensor) == 0
148 one_hot_tensor = np.zeros((label_num, np.max(label_tensor) + 1))
149 one_hot_tensor[np.arange(label_num), label_tensor] = 1
150 return one_hot_tensor
151
152 def statistics(self):
153 return self.graph_lists[0].ndata['feat'].shape[1],\
154 self.num_labels,\
155 self.max_num_node
156
157
158 class TUDataset(object):
159 """
160 TUDataset contains lots of graph kernel datasets for graph classification.
161 Graphs may have node labels, node attributes, edge labels, and edge attributes,
162 varing from different dataset.
163
164 :param name: Dataset Name, such as `ENZYMES`, `DD`, `COLLAB`, `MUTAG`, can be the
165 datasets name on https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets.
166 """
167
168 _url = r"https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets/{}.zip"
169
170 def __init__(self, name):
171
172 self.name = name
173 self.extract_dir = self._download()
174
175 DS_edge_list = self._idx_from_zero(
176 loadtxt(self._file_path("A"), delimiter=",").astype(int))
177 DS_indicator = self._idx_from_zero(
178 loadtxt(self._file_path("graph_indicator"), delimiter=",").astype(int))
179 DS_graph_labels = self._idx_from_zero(
180 loadtxt(self._file_path("graph_labels"), delimiter=",").astype(int))
181
182 g = DGLGraph()
183 g.add_nodes(int(DS_edge_list.max()) + 1)
184 g.add_edges(DS_edge_list[:, 0], DS_edge_list[:, 1])
185
186 node_idx_list = []
187 self.max_num_node = 0
188 for idx in range(np.max(DS_indicator) + 1):
189 node_idx = np.where(DS_indicator == idx)
190 node_idx_list.append(node_idx[0])
191 if len(node_idx[0]) > self.max_num_node:
192 self.max_num_node = len(node_idx[0])
193
194 self.num_labels = max(DS_graph_labels) + 1
195 self.graph_labels = DS_graph_labels
196
197 self.attr_dict = {
198 'node_labels': ('ndata', 'node_labels'),
199 'node_attributes': ('ndata', 'node_attr'),
200 'edge_labels': ('edata', 'edge_labels'),
201 'edge_attributes': ('edata', 'node_labels'),
202 }
203
204 for filename, field_name in self.attr_dict.items():
205 try:
206 data = loadtxt(self._file_path(filename),
207 delimiter=',').astype(int)
208 if 'label' in filename:
209 data = self._idx_from_zero(data)
210 getattr(g, field_name[0])[field_name[1]] = data
211 except IOError:
212 pass
213
214 self.graph_lists = g.subgraphs(node_idx_list)
215 for g in self.graph_lists:
216 g.copy_from_parent()
217
218 def __getitem__(self, idx):
219 """Get the i^th sample.
220 Paramters
221 ---------
222 idx : int
223 The sample index.
224 Returns
225 -------
226 (dgl.DGLGraph, int)
227 DGLGraph with node feature stored in `feat` field and node label in `node_label` if available.
228 And its label.
229 """
230 g = self.graph_lists[idx]
231 return g, self.graph_labels[idx]
232
233 def __len__(self):
234 return len(self.graph_lists)
235
236 def _download(self):
237 download_dir = get_download_dir()
238 zip_file_path = os.path.join(
239 download_dir,
240 "tu_{}.zip".format(
241 self.name))
242 download(self._url.format(self.name), path=zip_file_path)
243 extract_dir = os.path.join(download_dir, "tu_{}".format(self.name))
244 extract_archive(zip_file_path, extract_dir)
245 return extract_dir
246
247 def _file_path(self, category):
248 return os.path.join(self.extract_dir, self.name,
249 "{}_{}.txt".format(self.name, category))
250
251 @staticmethod
252 def _idx_from_zero(idx_tensor):
253 return idx_tensor - np.min(idx_tensor)
254
255 def statistics(self):
256 return self.graph_lists[0].ndata['feat'].shape[1], \
257 self.num_labels, \
258 self.max_num_node
259
[end of python/dgl/data/tu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/dgl/data/tu.py b/python/dgl/data/tu.py
--- a/python/dgl/data/tu.py
+++ b/python/dgl/data/tu.py
@@ -75,6 +75,8 @@
try:
DS_node_attr = np.loadtxt(
self._file_path("node_attributes"), delimiter=",")
+ if DS_node_attr.ndim == 1:
+ DS_node_attr = np.expand_dims(DS_node_attr, -1)
for idxs, g in zip(node_idx_list, self.graph_lists):
g.ndata['feat'] = DS_node_attr[idxs, :]
self.data_mode = "node_attr"
| {"golden_diff": "diff --git a/python/dgl/data/tu.py b/python/dgl/data/tu.py\n--- a/python/dgl/data/tu.py\n+++ b/python/dgl/data/tu.py\n@@ -75,6 +75,8 @@\n try:\n DS_node_attr = np.loadtxt(\n self._file_path(\"node_attributes\"), delimiter=\",\")\n+ if DS_node_attr.ndim == 1:\n+ DS_node_attr = np.expand_dims(DS_node_attr, -1)\n for idxs, g in zip(node_idx_list, self.graph_lists):\n g.ndata['feat'] = DS_node_attr[idxs, :]\n self.data_mode = \"node_attr\"\n", "issue": "Loading dataset PROTEINS leads to IndexError\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen loading dataset PROTEINS using function `LegacyTUDataset()`, raise `IndexError: too many indices for array`\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```python\r\nfrom dgl.data import LegacyTUDataset\r\n\r\ndataset = LegacyTUDataset(name='PROTEINS')\r\n ```\r\nThe error message as following:\r\n```\r\n~/opt/anaconda3/lib/python3.7/site-packages/dgl/data/tu.py in __init__(self, name, use_pandas, hidden_size, max_allow_node)\r\n 78 self._file_path(\"node_attributes\"), delimiter=\",\")\r\n 79 for idxs, g in zip(node_idx_list, self.graph_lists):\r\n---> 80 g.ndata['feat'] = DS_node_attr[idxs, :]\r\n 81 self.data_mode = \"node_attr\"\r\n 82 except IOError:\r\n\r\nIndexError: too many indices for array\r\n```\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n## Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nSuccessfully loading PROTEINS dataset\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): 0.4.2\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): PyTorch\r\n - OS (e.g., Linux): MacOS X\r\n - How you installed DGL (`conda`, `pip`, source): pip\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version (if applicable):\r\n - GPU models and configuration (e.g. V100):\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport numpy as np\nimport os\nimport random\n\nfrom .utils import download, extract_archive, get_download_dir, loadtxt\nfrom ..graph import DGLGraph\n\nclass LegacyTUDataset(object):\n \"\"\"\n TUDataset contains lots of graph kernel datasets for graph classification.\n Use provided node feature by default. If no feature provided, use one-hot node label instead.\n If neither labels provided, use constant for node feature.\n\n :param name: Dataset Name, such as `ENZYMES`, `DD`, `COLLAB`\n :param use_pandas: Default: False.\n Numpy's file read function has performance issue when file is large,\n using pandas can be faster.\n :param hidden_size: Default 10. Some dataset doesn't contain features.\n Use constant node features initialization instead, with hidden size as `hidden_size`.\n\n \"\"\"\n\n _url = r\"https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets/{}.zip\"\n\n def __init__(self, name, use_pandas=False,\n hidden_size=10, max_allow_node=None):\n\n self.name = name\n self.hidden_size = hidden_size\n self.extract_dir = self._download()\n self.data_mode = None\n self.max_allow_node = max_allow_node\n\n if use_pandas:\n import pandas as pd\n DS_edge_list = self._idx_from_zero(\n pd.read_csv(self._file_path(\"A\"), delimiter=\",\", dtype=int, header=None).values)\n else:\n DS_edge_list = self._idx_from_zero(\n np.genfromtxt(self._file_path(\"A\"), delimiter=\",\", dtype=int))\n\n DS_indicator = self._idx_from_zero(\n np.genfromtxt(self._file_path(\"graph_indicator\"), dtype=int))\n DS_graph_labels = self._idx_from_zero(\n np.genfromtxt(self._file_path(\"graph_labels\"), dtype=int))\n\n g = DGLGraph()\n g.add_nodes(int(DS_edge_list.max()) + 1)\n g.add_edges(DS_edge_list[:, 0], DS_edge_list[:, 1])\n\n node_idx_list = []\n self.max_num_node = 0\n for idx in range(np.max(DS_indicator) + 1):\n node_idx = np.where(DS_indicator == idx)\n node_idx_list.append(node_idx[0])\n if len(node_idx[0]) > self.max_num_node:\n self.max_num_node = len(node_idx[0])\n\n self.graph_lists = g.subgraphs(node_idx_list)\n self.num_labels = max(DS_graph_labels) + 1\n self.graph_labels = DS_graph_labels\n\n try:\n DS_node_labels = self._idx_from_zero(\n np.loadtxt(self._file_path(\"node_labels\"), dtype=int))\n g.ndata['node_label'] = DS_node_labels\n one_hot_node_labels = self._to_onehot(DS_node_labels)\n for idxs, g in zip(node_idx_list, self.graph_lists):\n g.ndata['feat'] = one_hot_node_labels[idxs, :]\n self.data_mode = \"node_label\"\n except IOError:\n print(\"No Node Label Data\")\n\n try:\n DS_node_attr = np.loadtxt(\n self._file_path(\"node_attributes\"), delimiter=\",\")\n for idxs, g in zip(node_idx_list, self.graph_lists):\n g.ndata['feat'] = DS_node_attr[idxs, :]\n self.data_mode = \"node_attr\"\n except IOError:\n print(\"No Node Attribute Data\")\n\n if 'feat' not in g.ndata.keys():\n for idxs, g in zip(node_idx_list, self.graph_lists):\n g.ndata['feat'] = np.ones((g.number_of_nodes(), hidden_size))\n self.data_mode = \"constant\"\n print(\n \"Use Constant one as Feature with hidden size {}\".format(hidden_size))\n\n # remove graphs that are too large by user given standard\n # optional pre-processing steop in conformity with Rex Ying's original\n # DiffPool implementation\n if self.max_allow_node:\n preserve_idx = []\n print(\"original dataset length : \", len(self.graph_lists))\n for (i, g) in enumerate(self.graph_lists):\n if g.number_of_nodes() <= self.max_allow_node:\n preserve_idx.append(i)\n self.graph_lists = [self.graph_lists[i] for i in preserve_idx]\n print(\n \"after pruning graphs that are too big : \", len(\n self.graph_lists))\n self.graph_labels = [self.graph_labels[i] for i in preserve_idx]\n self.max_num_node = self.max_allow_node\n\n def __getitem__(self, idx):\n \"\"\"Get the i^th sample.\n Paramters\n ---------\n idx : int\n The sample index.\n Returns\n -------\n (dgl.DGLGraph, int)\n DGLGraph with node feature stored in `feat` field and node label in `node_label` if available.\n And its label.\n \"\"\"\n g = self.graph_lists[idx]\n return g, self.graph_labels[idx]\n\n def __len__(self):\n return len(self.graph_lists)\n\n def _download(self):\n download_dir = get_download_dir()\n zip_file_path = os.path.join(\n download_dir,\n \"tu_{}.zip\".format(\n self.name))\n download(self._url.format(self.name), path=zip_file_path)\n extract_dir = os.path.join(download_dir, \"tu_{}\".format(self.name))\n extract_archive(zip_file_path, extract_dir)\n return extract_dir\n\n def _file_path(self, category):\n return os.path.join(self.extract_dir, self.name,\n \"{}_{}.txt\".format(self.name, category))\n\n @staticmethod\n def _idx_from_zero(idx_tensor):\n return idx_tensor - np.min(idx_tensor)\n\n @staticmethod\n def _to_onehot(label_tensor):\n label_num = label_tensor.shape[0]\n assert np.min(label_tensor) == 0\n one_hot_tensor = np.zeros((label_num, np.max(label_tensor) + 1))\n one_hot_tensor[np.arange(label_num), label_tensor] = 1\n return one_hot_tensor\n\n def statistics(self):\n return self.graph_lists[0].ndata['feat'].shape[1],\\\n self.num_labels,\\\n self.max_num_node\n\n\nclass TUDataset(object):\n \"\"\"\n TUDataset contains lots of graph kernel datasets for graph classification.\n Graphs may have node labels, node attributes, edge labels, and edge attributes,\n varing from different dataset.\n\n :param name: Dataset Name, such as `ENZYMES`, `DD`, `COLLAB`, `MUTAG`, can be the \n datasets name on https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets.\n \"\"\"\n\n _url = r\"https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets/{}.zip\"\n\n def __init__(self, name):\n\n self.name = name\n self.extract_dir = self._download()\n\n DS_edge_list = self._idx_from_zero(\n loadtxt(self._file_path(\"A\"), delimiter=\",\").astype(int))\n DS_indicator = self._idx_from_zero(\n loadtxt(self._file_path(\"graph_indicator\"), delimiter=\",\").astype(int))\n DS_graph_labels = self._idx_from_zero(\n loadtxt(self._file_path(\"graph_labels\"), delimiter=\",\").astype(int))\n\n g = DGLGraph()\n g.add_nodes(int(DS_edge_list.max()) + 1)\n g.add_edges(DS_edge_list[:, 0], DS_edge_list[:, 1])\n\n node_idx_list = []\n self.max_num_node = 0\n for idx in range(np.max(DS_indicator) + 1):\n node_idx = np.where(DS_indicator == idx)\n node_idx_list.append(node_idx[0])\n if len(node_idx[0]) > self.max_num_node:\n self.max_num_node = len(node_idx[0])\n\n self.num_labels = max(DS_graph_labels) + 1\n self.graph_labels = DS_graph_labels\n\n self.attr_dict = {\n 'node_labels': ('ndata', 'node_labels'),\n 'node_attributes': ('ndata', 'node_attr'),\n 'edge_labels': ('edata', 'edge_labels'),\n 'edge_attributes': ('edata', 'node_labels'),\n }\n\n for filename, field_name in self.attr_dict.items():\n try:\n data = loadtxt(self._file_path(filename),\n delimiter=',').astype(int)\n if 'label' in filename:\n data = self._idx_from_zero(data)\n getattr(g, field_name[0])[field_name[1]] = data\n except IOError:\n pass\n\n self.graph_lists = g.subgraphs(node_idx_list)\n for g in self.graph_lists:\n g.copy_from_parent()\n\n def __getitem__(self, idx):\n \"\"\"Get the i^th sample.\n Paramters\n ---------\n idx : int\n The sample index.\n Returns\n -------\n (dgl.DGLGraph, int)\n DGLGraph with node feature stored in `feat` field and node label in `node_label` if available.\n And its label.\n \"\"\"\n g = self.graph_lists[idx]\n return g, self.graph_labels[idx]\n\n def __len__(self):\n return len(self.graph_lists)\n\n def _download(self):\n download_dir = get_download_dir()\n zip_file_path = os.path.join(\n download_dir,\n \"tu_{}.zip\".format(\n self.name))\n download(self._url.format(self.name), path=zip_file_path)\n extract_dir = os.path.join(download_dir, \"tu_{}\".format(self.name))\n extract_archive(zip_file_path, extract_dir)\n return extract_dir\n\n def _file_path(self, category):\n return os.path.join(self.extract_dir, self.name,\n \"{}_{}.txt\".format(self.name, category))\n\n @staticmethod\n def _idx_from_zero(idx_tensor):\n return idx_tensor - np.min(idx_tensor)\n\n def statistics(self):\n return self.graph_lists[0].ndata['feat'].shape[1], \\\n self.num_labels, \\\n self.max_num_node\n", "path": "python/dgl/data/tu.py"}]} | 3,861 | 143 |
gh_patches_debug_24048 | rasdani/github-patches | git_diff | translate__pootle-5595 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Directory hashes are not expired when directories are added/removed
if you add or remove a directory it should expire hashes on all parents and related - ie /projects/x directories
</issue>
<code>
[start of pootle/apps/pootle_revision/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models.signals import post_save
10 from django.dispatch import receiver
11
12 from pootle.core.delegate import revision_updater
13 from pootle_data.models import StoreData
14 from pootle_store.models import Store
15
16
17 @receiver(post_save, sender=StoreData)
18 def handle_storedata_save(**kwargs):
19 revision_updater.get(Store)(
20 context=kwargs["instance"].store).update(keys=["stats", "checks"])
21
[end of pootle/apps/pootle_revision/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py
--- a/pootle/apps/pootle_revision/receivers.py
+++ b/pootle/apps/pootle_revision/receivers.py
@@ -6,10 +6,11 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
-from django.db.models.signals import post_save
+from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from pootle.core.delegate import revision_updater
+from pootle_app.models import Directory
from pootle_data.models import StoreData
from pootle_store.models import Store
@@ -18,3 +19,17 @@
def handle_storedata_save(**kwargs):
revision_updater.get(Store)(
context=kwargs["instance"].store).update(keys=["stats", "checks"])
+
+
+@receiver(post_save, sender=Directory)
+def handle_directory_save(**kwargs):
+ if kwargs.get("created"):
+ return
+ revision_updater.get(Directory)(
+ context=kwargs["instance"]).update(keys=["stats", "checks"])
+
+
+@receiver(pre_delete, sender=Directory)
+def handle_directory_delete(**kwargs):
+ revision_updater.get(Directory)(
+ context=kwargs["instance"].parent).update(keys=["stats", "checks"])
| {"golden_diff": "diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py\n--- a/pootle/apps/pootle_revision/receivers.py\n+++ b/pootle/apps/pootle_revision/receivers.py\n@@ -6,10 +6,11 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n-from django.db.models.signals import post_save\n+from django.db.models.signals import post_save, pre_delete\n from django.dispatch import receiver\n \n from pootle.core.delegate import revision_updater\n+from pootle_app.models import Directory\n from pootle_data.models import StoreData\n from pootle_store.models import Store\n \n@@ -18,3 +19,17 @@\n def handle_storedata_save(**kwargs):\n revision_updater.get(Store)(\n context=kwargs[\"instance\"].store).update(keys=[\"stats\", \"checks\"])\n+\n+\n+@receiver(post_save, sender=Directory)\n+def handle_directory_save(**kwargs):\n+ if kwargs.get(\"created\"):\n+ return\n+ revision_updater.get(Directory)(\n+ context=kwargs[\"instance\"]).update(keys=[\"stats\", \"checks\"])\n+\n+\n+@receiver(pre_delete, sender=Directory)\n+def handle_directory_delete(**kwargs):\n+ revision_updater.get(Directory)(\n+ context=kwargs[\"instance\"].parent).update(keys=[\"stats\", \"checks\"])\n", "issue": "Directory hashes are not expired when directories are added/removed\nif you add or remove a directory it should expire hashes on all parents and related - ie /projects/x directories\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom pootle.core.delegate import revision_updater\nfrom pootle_data.models import StoreData\nfrom pootle_store.models import Store\n\n\n@receiver(post_save, sender=StoreData)\ndef handle_storedata_save(**kwargs):\n revision_updater.get(Store)(\n context=kwargs[\"instance\"].store).update(keys=[\"stats\", \"checks\"])\n", "path": "pootle/apps/pootle_revision/receivers.py"}]} | 778 | 319 |
gh_patches_debug_43900 | rasdani/github-patches | git_diff | ansible__awx-8348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
collection module tower_inventory_source_update should have "name" instead of "inventory_source"
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
- http://webchat.freenode.net/?channels=ansible-awx
- https://groups.google.com/forum/#!forum/awx-project
We have to limit this because of limited volunteer time to respond to issues! -->
##### ISSUE TYPE
- Feature Idea
##### SUMMARY
<!-- Briefly describe the problem or desired enhancement. -->
Module that works on an entity should reference that entity by `name`. It feels more ansibly anyway.
</issue>
<code>
[start of awx_collection/plugins/modules/tower_inventory_source_update.py]
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # (c) 2020, Bianca Henderson <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10
11 ANSIBLE_METADATA = {'metadata_version': '1.1',
12 'status': ['preview'],
13 'supported_by': 'community'}
14
15
16 DOCUMENTATION = '''
17 ---
18 module: tower_inventory_source_update
19 author: "Bianca Henderson (@beeankha)"
20 short_description: Update inventory source(s).
21 description:
22 - Update Ansible Tower inventory source(s). See
23 U(https://www.ansible.com/tower) for an overview.
24 options:
25 inventory:
26 description:
27 - Name of the inventory that contains the inventory source(s) to update.
28 required: True
29 type: str
30 inventory_source:
31 description:
32 - The name of the inventory source to update.
33 required: True
34 type: str
35 organization:
36 description:
37 - Name of the inventory source's inventory's organization.
38 type: str
39 wait:
40 description:
41 - Wait for the job to complete.
42 default: False
43 type: bool
44 interval:
45 description:
46 - The interval to request an update from Tower.
47 required: False
48 default: 1
49 type: float
50 timeout:
51 description:
52 - If waiting for the job to complete this will abort after this
53 amount of seconds
54 type: int
55 extends_documentation_fragment: awx.awx.auth
56 '''
57
58 EXAMPLES = '''
59 - name: Update a single inventory source
60 tower_inventory_source_update:
61 inventory: "My Inventory"
62 inventory_source: "Example Inventory Source"
63 organization: Default
64
65 - name: Update all inventory sources
66 tower_inventory_source_update:
67 inventory: "My Other Inventory"
68 inventory_source: "{{ item }}"
69 loop: "{{ query('awx.awx.tower_api', 'inventory_sources', query_params={ 'inventory': 30 }, return_ids=True ) }}"
70 '''
71
72 RETURN = '''
73 id:
74 description: id of the inventory update
75 returned: success
76 type: int
77 sample: 86
78 status:
79 description: status of the inventory update
80 returned: success
81 type: str
82 sample: pending
83 '''
84
85 from ..module_utils.tower_api import TowerAPIModule
86
87
88 def main():
89 # Any additional arguments that are not fields of the item can be added here
90 argument_spec = dict(
91 inventory=dict(required=True),
92 inventory_source=dict(required=True),
93 organization=dict(),
94 wait=dict(default=False, type='bool'),
95 interval=dict(default=1.0, type='float'),
96 timeout=dict(default=None, type='int'),
97 )
98
99 # Create a module for ourselves
100 module = TowerAPIModule(argument_spec=argument_spec)
101
102 # Extract our parameters
103 inventory = module.params.get('inventory')
104 inventory_source = module.params.get('inventory_source')
105 organization = module.params.get('organization')
106 wait = module.params.get('wait')
107 interval = module.params.get('interval')
108 timeout = module.params.get('timeout')
109
110 lookup_data = {}
111 if organization:
112 lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)
113 inventory_object = module.get_one('inventories', name_or_id=inventory, data=lookup_data)
114
115 if not inventory_object:
116 module.fail_json(msg='The specified inventory, {0}, was not found.'.format(lookup_data))
117
118 inventory_source_object = module.get_one('inventory_sources', name_or_id=inventory_source, **{
119 'data': {
120 'inventory': inventory_object['id'],
121 }
122 })
123
124 if not inventory_source_object:
125 module.fail_json(msg='The specified inventory source was not found.')
126
127 # Sync the inventory source(s)
128 inventory_source_update_results = module.post_endpoint(inventory_source_object['related']['update'], **{'data': {}})
129
130 if inventory_source_update_results['status_code'] != 202:
131 module.fail_json(msg="Failed to update inventory source, see response for details", **{'response': inventory_source_update_results})
132
133 module.json_output['changed'] = True
134 module.json_output['id'] = inventory_source_update_results['json']['id']
135 module.json_output['status'] = inventory_source_update_results['json']['status']
136
137 if not wait:
138 module.exit_json(**module.json_output)
139
140 # Invoke wait function
141 module.wait_on_url(
142 url=inventory_source_update_results['json']['url'],
143 object_name=inventory_object,
144 object_type='inventory_update',
145 timeout=timeout, interval=interval
146 )
147
148 module.exit_json(**module.json_output)
149
150
151 if __name__ == '__main__':
152 main()
153
[end of awx_collection/plugins/modules/tower_inventory_source_update.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awx_collection/plugins/modules/tower_inventory_source_update.py b/awx_collection/plugins/modules/tower_inventory_source_update.py
--- a/awx_collection/plugins/modules/tower_inventory_source_update.py
+++ b/awx_collection/plugins/modules/tower_inventory_source_update.py
@@ -22,14 +22,16 @@
- Update Ansible Tower inventory source(s). See
U(https://www.ansible.com/tower) for an overview.
options:
- inventory:
+ name:
description:
- - Name of the inventory that contains the inventory source(s) to update.
+ - The name or id of the inventory source to update.
required: True
type: str
- inventory_source:
+ aliases:
+ - inventory_source
+ inventory:
description:
- - The name of the inventory source to update.
+ - Name or id of the inventory that contains the inventory source(s) to update.
required: True
type: str
organization:
@@ -58,14 +60,14 @@
EXAMPLES = '''
- name: Update a single inventory source
tower_inventory_source_update:
+ name: "Example Inventory Source"
inventory: "My Inventory"
- inventory_source: "Example Inventory Source"
organization: Default
- name: Update all inventory sources
tower_inventory_source_update:
+ name: "{{ item }}"
inventory: "My Other Inventory"
- inventory_source: "{{ item }}"
loop: "{{ query('awx.awx.tower_api', 'inventory_sources', query_params={ 'inventory': 30 }, return_ids=True ) }}"
'''
@@ -88,8 +90,8 @@
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
+ name=dict(required=True, aliases=['inventory_source']),
inventory=dict(required=True),
- inventory_source=dict(required=True),
organization=dict(),
wait=dict(default=False, type='bool'),
interval=dict(default=1.0, type='float'),
@@ -100,8 +102,8 @@
module = TowerAPIModule(argument_spec=argument_spec)
# Extract our parameters
+ name = module.params.get('name')
inventory = module.params.get('inventory')
- inventory_source = module.params.get('inventory_source')
organization = module.params.get('organization')
wait = module.params.get('wait')
interval = module.params.get('interval')
@@ -115,20 +117,18 @@
if not inventory_object:
module.fail_json(msg='The specified inventory, {0}, was not found.'.format(lookup_data))
- inventory_source_object = module.get_one('inventory_sources', name_or_id=inventory_source, **{
- 'data': {
- 'inventory': inventory_object['id'],
- }
- })
+ inventory_source_object = module.get_one('inventory_sources',
+ name_or_id=name,
+ data={'inventory': inventory_object['id']})
if not inventory_source_object:
module.fail_json(msg='The specified inventory source was not found.')
# Sync the inventory source(s)
- inventory_source_update_results = module.post_endpoint(inventory_source_object['related']['update'], **{'data': {}})
+ inventory_source_update_results = module.post_endpoint(inventory_source_object['related']['update'])
if inventory_source_update_results['status_code'] != 202:
- module.fail_json(msg="Failed to update inventory source, see response for details", **{'response': inventory_source_update_results})
+ module.fail_json(msg="Failed to update inventory source, see response for details", response=inventory_source_update_results)
module.json_output['changed'] = True
module.json_output['id'] = inventory_source_update_results['json']['id']
| {"golden_diff": "diff --git a/awx_collection/plugins/modules/tower_inventory_source_update.py b/awx_collection/plugins/modules/tower_inventory_source_update.py\n--- a/awx_collection/plugins/modules/tower_inventory_source_update.py\n+++ b/awx_collection/plugins/modules/tower_inventory_source_update.py\n@@ -22,14 +22,16 @@\n - Update Ansible Tower inventory source(s). See\n U(https://www.ansible.com/tower) for an overview.\n options:\n- inventory:\n+ name:\n description:\n- - Name of the inventory that contains the inventory source(s) to update.\n+ - The name or id of the inventory source to update.\n required: True\n type: str\n- inventory_source:\n+ aliases:\n+ - inventory_source\n+ inventory:\n description:\n- - The name of the inventory source to update.\n+ - Name or id of the inventory that contains the inventory source(s) to update.\n required: True\n type: str\n organization:\n@@ -58,14 +60,14 @@\n EXAMPLES = '''\n - name: Update a single inventory source\n tower_inventory_source_update:\n+ name: \"Example Inventory Source\"\n inventory: \"My Inventory\"\n- inventory_source: \"Example Inventory Source\"\n organization: Default\n \n - name: Update all inventory sources\n tower_inventory_source_update:\n+ name: \"{{ item }}\"\n inventory: \"My Other Inventory\"\n- inventory_source: \"{{ item }}\"\n loop: \"{{ query('awx.awx.tower_api', 'inventory_sources', query_params={ 'inventory': 30 }, return_ids=True ) }}\"\n '''\n \n@@ -88,8 +90,8 @@\n def main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n+ name=dict(required=True, aliases=['inventory_source']),\n inventory=dict(required=True),\n- inventory_source=dict(required=True),\n organization=dict(),\n wait=dict(default=False, type='bool'),\n interval=dict(default=1.0, type='float'),\n@@ -100,8 +102,8 @@\n module = TowerAPIModule(argument_spec=argument_spec)\n \n # Extract our parameters\n+ name = module.params.get('name')\n inventory = module.params.get('inventory')\n- inventory_source = module.params.get('inventory_source')\n organization = module.params.get('organization')\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n@@ -115,20 +117,18 @@\n if not inventory_object:\n module.fail_json(msg='The specified inventory, {0}, was not found.'.format(lookup_data))\n \n- inventory_source_object = module.get_one('inventory_sources', name_or_id=inventory_source, **{\n- 'data': {\n- 'inventory': inventory_object['id'],\n- }\n- })\n+ inventory_source_object = module.get_one('inventory_sources',\n+ name_or_id=name,\n+ data={'inventory': inventory_object['id']})\n \n if not inventory_source_object:\n module.fail_json(msg='The specified inventory source was not found.')\n \n # Sync the inventory source(s)\n- inventory_source_update_results = module.post_endpoint(inventory_source_object['related']['update'], **{'data': {}})\n+ inventory_source_update_results = module.post_endpoint(inventory_source_object['related']['update'])\n \n if inventory_source_update_results['status_code'] != 202:\n- module.fail_json(msg=\"Failed to update inventory source, see response for details\", **{'response': inventory_source_update_results})\n+ module.fail_json(msg=\"Failed to update inventory source, see response for details\", response=inventory_source_update_results)\n \n module.json_output['changed'] = True\n module.json_output['id'] = inventory_source_update_results['json']['id']\n", "issue": "collection module tower_inventory_source_update should have \"name\" instead of \"inventory_source\"\n<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:\r\n\r\n- http://webchat.freenode.net/?channels=ansible-awx\r\n- https://groups.google.com/forum/#!forum/awx-project\r\n\r\nWe have to limit this because of limited volunteer time to respond to issues! -->\r\n\r\n##### ISSUE TYPE\r\n - Feature Idea\r\n\r\n##### SUMMARY\r\n<!-- Briefly describe the problem or desired enhancement. -->\r\nModule that works on an entity should reference that entity by `name`. It feels more ansibly anyway.\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2020, Bianca Henderson <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_inventory_source_update\nauthor: \"Bianca Henderson (@beeankha)\"\nshort_description: Update inventory source(s).\ndescription:\n - Update Ansible Tower inventory source(s). See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n inventory:\n description:\n - Name of the inventory that contains the inventory source(s) to update.\n required: True\n type: str\n inventory_source:\n description:\n - The name of the inventory source to update.\n required: True\n type: str\n organization:\n description:\n - Name of the inventory source's inventory's organization.\n type: str\n wait:\n description:\n - Wait for the job to complete.\n default: False\n type: bool\n interval:\n description:\n - The interval to request an update from Tower.\n required: False\n default: 1\n type: float\n timeout:\n description:\n - If waiting for the job to complete this will abort after this\n amount of seconds\n type: int\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Update a single inventory source\n tower_inventory_source_update:\n inventory: \"My Inventory\"\n inventory_source: \"Example Inventory Source\"\n organization: Default\n\n- name: Update all inventory sources\n tower_inventory_source_update:\n inventory: \"My Other Inventory\"\n inventory_source: \"{{ item }}\"\n loop: \"{{ query('awx.awx.tower_api', 'inventory_sources', query_params={ 'inventory': 30 }, return_ids=True ) }}\"\n'''\n\nRETURN = '''\nid:\n description: id of the inventory update\n returned: success\n type: int\n sample: 86\nstatus:\n description: status of the inventory update\n returned: success\n type: str\n sample: pending\n'''\n\nfrom ..module_utils.tower_api import TowerAPIModule\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n inventory=dict(required=True),\n inventory_source=dict(required=True),\n organization=dict(),\n wait=dict(default=False, type='bool'),\n interval=dict(default=1.0, type='float'),\n timeout=dict(default=None, type='int'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n inventory = module.params.get('inventory')\n inventory_source = module.params.get('inventory_source')\n organization = module.params.get('organization')\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n\n lookup_data = {}\n if organization:\n lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)\n inventory_object = module.get_one('inventories', name_or_id=inventory, data=lookup_data)\n\n if not inventory_object:\n module.fail_json(msg='The specified inventory, {0}, was not found.'.format(lookup_data))\n\n inventory_source_object = module.get_one('inventory_sources', name_or_id=inventory_source, **{\n 'data': {\n 'inventory': inventory_object['id'],\n }\n })\n\n if not inventory_source_object:\n module.fail_json(msg='The specified inventory source was not found.')\n\n # Sync the inventory source(s)\n inventory_source_update_results = module.post_endpoint(inventory_source_object['related']['update'], **{'data': {}})\n\n if inventory_source_update_results['status_code'] != 202:\n module.fail_json(msg=\"Failed to update inventory source, see response for details\", **{'response': inventory_source_update_results})\n\n module.json_output['changed'] = True\n module.json_output['id'] = inventory_source_update_results['json']['id']\n module.json_output['status'] = inventory_source_update_results['json']['status']\n\n if not wait:\n module.exit_json(**module.json_output)\n\n # Invoke wait function\n module.wait_on_url(\n url=inventory_source_update_results['json']['url'],\n object_name=inventory_object,\n object_type='inventory_update',\n timeout=timeout, interval=interval\n )\n\n module.exit_json(**module.json_output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_inventory_source_update.py"}]} | 2,094 | 834 |
gh_patches_debug_6433 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: imsave cannot detect right extension of pathlib
imsave cannot detect right extension of pathlib
```python
from pathlib import Path
from skimage import io
filename = Path("out.jpg")
io.imsave(filename, im) # this saves im as a png file though the extension is jpg
io.imsave("out.jpg", im) #this is working correctly
io.imsave(str(filename), im) #this is working correctly
```
</issue>
<code>
[start of skimage/io/_io.py]
1 import pathlib
2
3 import numpy as np
4
5 from .._shared.utils import warn
6 from ..exposure import is_low_contrast
7 from ..color.colorconv import rgb2gray, rgba2rgb
8 from ..io.manage_plugins import call_plugin
9 from .util import file_or_url_context
10
11
12 __all__ = ['imread', 'imsave', 'imshow', 'show',
13 'imread_collection', 'imshow_collection']
14
15
16 def imread(fname, as_gray=False, plugin=None, **plugin_args):
17 """Load an image from file.
18
19 Parameters
20 ----------
21 fname : string
22 Image file name, e.g. ``test.jpg`` or URL.
23 as_gray : bool, optional
24 If True, convert color images to gray-scale (64-bit floats).
25 Images that are already in gray-scale format are not converted.
26 plugin : str, optional
27 Name of plugin to use. By default, the different plugins are
28 tried (starting with imageio) until a suitable
29 candidate is found. If not given and fname is a tiff file, the
30 tifffile plugin will be used.
31
32 Other Parameters
33 ----------------
34 plugin_args : keywords
35 Passed to the given plugin.
36
37 Returns
38 -------
39 img_array : ndarray
40 The different color bands/channels are stored in the
41 third dimension, such that a gray-image is MxN, an
42 RGB-image MxNx3 and an RGBA-image MxNx4.
43
44 """
45 if isinstance(fname, pathlib.Path):
46 fname = str(fname.resolve())
47
48 if plugin is None and hasattr(fname, 'lower'):
49 if fname.lower().endswith(('.tiff', '.tif')):
50 plugin = 'tifffile'
51
52 with file_or_url_context(fname) as fname:
53 img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
54
55 if not hasattr(img, 'ndim'):
56 return img
57
58 if img.ndim > 2:
59 if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):
60 img = np.swapaxes(img, -1, -3)
61 img = np.swapaxes(img, -2, -3)
62
63 if as_gray:
64 if img.shape[2] == 4:
65 img = rgba2rgb(img)
66 img = rgb2gray(img)
67
68 return img
69
70
71 def imread_collection(load_pattern, conserve_memory=True,
72 plugin=None, **plugin_args):
73 """
74 Load a collection of images.
75
76 Parameters
77 ----------
78 load_pattern : str or list
79 List of objects to load. These are usually filenames, but may
80 vary depending on the currently active plugin. See the docstring
81 for ``ImageCollection`` for the default behaviour of this parameter.
82 conserve_memory : bool, optional
83 If True, never keep more than one in memory at a specific
84 time. Otherwise, images will be cached once they are loaded.
85
86 Returns
87 -------
88 ic : ImageCollection
89 Collection of images.
90
91 Other Parameters
92 ----------------
93 plugin_args : keywords
94 Passed to the given plugin.
95
96 """
97 return call_plugin('imread_collection', load_pattern, conserve_memory,
98 plugin=plugin, **plugin_args)
99
100
101 def imsave(fname, arr, plugin=None, check_contrast=True, **plugin_args):
102 """Save an image to file.
103
104 Parameters
105 ----------
106 fname : str
107 Target filename.
108 arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)
109 Image data.
110 plugin : str, optional
111 Name of plugin to use. By default, the different plugins are
112 tried (starting with imageio) until a suitable
113 candidate is found. If not given and fname is a tiff file, the
114 tifffile plugin will be used.
115 check_contrast : bool, optional
116 Check for low contrast and print warning (default: True).
117
118 Other Parameters
119 ----------------
120 plugin_args : keywords
121 Passed to the given plugin.
122
123 Notes
124 -----
125 When saving a JPEG, the compression ratio may be controlled using the
126 ``quality`` keyword argument which is an integer with values in [1, 100]
127 where 1 is worst quality and smallest file size, and 100 is best quality
128 and largest file size (default 75). This is only available when using
129 the PIL and imageio plugins.
130 """
131 if plugin is None and hasattr(fname, 'lower'):
132 if fname.lower().endswith(('.tiff', '.tif')):
133 plugin = 'tifffile'
134 if arr.dtype == bool:
135 warn('%s is a boolean image: setting True to 255 and False to 0. '
136 'To silence this warning, please convert the image using '
137 'img_as_ubyte.' % fname, stacklevel=2)
138 arr = arr.astype('uint8') * 255
139 if check_contrast and is_low_contrast(arr):
140 warn('%s is a low contrast image' % fname)
141 return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)
142
143
144 def imshow(arr, plugin=None, **plugin_args):
145 """Display an image.
146
147 Parameters
148 ----------
149 arr : ndarray or str
150 Image data or name of image file.
151 plugin : str
152 Name of plugin to use. By default, the different plugins are
153 tried (starting with imageio) until a suitable
154 candidate is found.
155
156 Other Parameters
157 ----------------
158 plugin_args : keywords
159 Passed to the given plugin.
160
161 """
162 if isinstance(arr, str):
163 arr = call_plugin('imread', arr, plugin=plugin)
164 return call_plugin('imshow', arr, plugin=plugin, **plugin_args)
165
166
167 def imshow_collection(ic, plugin=None, **plugin_args):
168 """Display a collection of images.
169
170 Parameters
171 ----------
172 ic : ImageCollection
173 Collection to display.
174 plugin : str
175 Name of plugin to use. By default, the different plugins are
176 tried until a suitable candidate is found.
177
178 Other Parameters
179 ----------------
180 plugin_args : keywords
181 Passed to the given plugin.
182
183 """
184 return call_plugin('imshow_collection', ic, plugin=plugin, **plugin_args)
185
186
187 def show():
188 '''Display pending images.
189
190 Launch the event loop of the current gui plugin, and display all
191 pending images, queued via `imshow`. This is required when using
192 `imshow` from non-interactive scripts.
193
194 A call to `show` will block execution of code until all windows
195 have been closed.
196
197 Examples
198 --------
199 >>> import skimage.io as io
200
201 >>> rng = np.random.default_rng()
202 >>> for i in range(4):
203 ... ax_im = io.imshow(rng.random((50, 50)))
204 >>> io.show() # doctest: +SKIP
205
206 '''
207 return call_plugin('_app_show')
208
[end of skimage/io/_io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/io/_io.py b/skimage/io/_io.py
--- a/skimage/io/_io.py
+++ b/skimage/io/_io.py
@@ -128,6 +128,8 @@
and largest file size (default 75). This is only available when using
the PIL and imageio plugins.
"""
+ if isinstance(fname, pathlib.Path):
+ fname = str(fname.resolve())
if plugin is None and hasattr(fname, 'lower'):
if fname.lower().endswith(('.tiff', '.tif')):
plugin = 'tifffile'
| {"golden_diff": "diff --git a/skimage/io/_io.py b/skimage/io/_io.py\n--- a/skimage/io/_io.py\n+++ b/skimage/io/_io.py\n@@ -128,6 +128,8 @@\n and largest file size (default 75). This is only available when using\n the PIL and imageio plugins.\n \"\"\"\n+ if isinstance(fname, pathlib.Path):\n+ fname = str(fname.resolve())\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n", "issue": "bug: imsave cannot detect right extension of pathlib \nimsave cannot detect right extension of pathlib \r\n\r\n```python\r\nfrom pathlib import Path\r\nfrom skimage import io\r\n\r\nfilename = Path(\"out.jpg\")\r\nio.imsave(filename, im) # this saves im as a png file though the extension is jpg\r\n\r\nio.imsave(\"out.jpg\", im) #this is working correctly\r\nio.imsave(str(filename), im) #this is working correctly\r\n```\n", "before_files": [{"content": "import pathlib\n\nimport numpy as np\n\nfrom .._shared.utils import warn\nfrom ..exposure import is_low_contrast\nfrom ..color.colorconv import rgb2gray, rgba2rgb\nfrom ..io.manage_plugins import call_plugin\nfrom .util import file_or_url_context\n\n\n__all__ = ['imread', 'imsave', 'imshow', 'show',\n 'imread_collection', 'imshow_collection']\n\n\ndef imread(fname, as_gray=False, plugin=None, **plugin_args):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.jpg`` or URL.\n as_gray : bool, optional\n If True, convert color images to gray-scale (64-bit floats).\n Images that are already in gray-scale format are not converted.\n plugin : str, optional\n Name of plugin to use. By default, the different plugins are\n tried (starting with imageio) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n\n Other Parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n Returns\n -------\n img_array : ndarray\n The different color bands/channels are stored in the\n third dimension, such that a gray-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n \"\"\"\n if isinstance(fname, pathlib.Path):\n fname = str(fname.resolve())\n\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n\n if not hasattr(img, 'ndim'):\n return img\n\n if img.ndim > 2:\n if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):\n img = np.swapaxes(img, -1, -3)\n img = np.swapaxes(img, -2, -3)\n\n if as_gray:\n if img.shape[2] == 4:\n img = rgba2rgb(img)\n img = rgb2gray(img)\n\n return img\n\n\ndef imread_collection(load_pattern, conserve_memory=True,\n plugin=None, **plugin_args):\n \"\"\"\n Load a collection of images.\n\n Parameters\n ----------\n load_pattern : str or list\n List of objects to load. These are usually filenames, but may\n vary depending on the currently active plugin. See the docstring\n for ``ImageCollection`` for the default behaviour of this parameter.\n conserve_memory : bool, optional\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n ic : ImageCollection\n Collection of images.\n\n Other Parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imread_collection', load_pattern, conserve_memory,\n plugin=plugin, **plugin_args)\n\n\ndef imsave(fname, arr, plugin=None, check_contrast=True, **plugin_args):\n \"\"\"Save an image to file.\n\n Parameters\n ----------\n fname : str\n Target filename.\n arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)\n Image data.\n plugin : str, optional\n Name of plugin to use. By default, the different plugins are\n tried (starting with imageio) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n check_contrast : bool, optional\n Check for low contrast and print warning (default: True).\n\n Other Parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n Notes\n -----\n When saving a JPEG, the compression ratio may be controlled using the\n ``quality`` keyword argument which is an integer with values in [1, 100]\n where 1 is worst quality and smallest file size, and 100 is best quality\n and largest file size (default 75). This is only available when using\n the PIL and imageio plugins.\n \"\"\"\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n if arr.dtype == bool:\n warn('%s is a boolean image: setting True to 255 and False to 0. '\n 'To silence this warning, please convert the image using '\n 'img_as_ubyte.' % fname, stacklevel=2)\n arr = arr.astype('uint8') * 255\n if check_contrast and is_low_contrast(arr):\n warn('%s is a low contrast image' % fname)\n return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)\n\n\ndef imshow(arr, plugin=None, **plugin_args):\n \"\"\"Display an image.\n\n Parameters\n ----------\n arr : ndarray or str\n Image data or name of image file.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with imageio) until a suitable\n candidate is found.\n\n Other Parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if isinstance(arr, str):\n arr = call_plugin('imread', arr, plugin=plugin)\n return call_plugin('imshow', arr, plugin=plugin, **plugin_args)\n\n\ndef imshow_collection(ic, plugin=None, **plugin_args):\n \"\"\"Display a collection of images.\n\n Parameters\n ----------\n ic : ImageCollection\n Collection to display.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried until a suitable candidate is found.\n\n Other Parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imshow_collection', ic, plugin=plugin, **plugin_args)\n\n\ndef show():\n '''Display pending images.\n\n Launch the event loop of the current gui plugin, and display all\n pending images, queued via `imshow`. This is required when using\n `imshow` from non-interactive scripts.\n\n A call to `show` will block execution of code until all windows\n have been closed.\n\n Examples\n --------\n >>> import skimage.io as io\n\n >>> rng = np.random.default_rng()\n >>> for i in range(4):\n ... ax_im = io.imshow(rng.random((50, 50)))\n >>> io.show() # doctest: +SKIP\n\n '''\n return call_plugin('_app_show')\n", "path": "skimage/io/_io.py"}]} | 2,684 | 135 |
gh_patches_debug_30833 | rasdani/github-patches | git_diff | numpy__numpy-13083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failed to compile scipy with Anaconda gfortran.
<!-- Please describe the issue in detail here, and fill in the fields below -->
The compilation fails with an error and prints a command it is trying to execute.
If I type the command at the end of the error log manually, it works.
I think this points to a quotation error. The additional fortran flags from the environment shall be added to the command line list after shell-style string split. It will produce this exact problem if the full list of fortran flags are added as a single string.
### Reproducing code example:
clone scipy from github;
activate Anaconda build enviroment.
python runtests.py
### Error message:
```
f951: Error: unrecognized command line option '-fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe'
Running from scipy source directory.
/home/yfeng1/anaconda3/install/lib/python3.6/site-packages/numpy/distutils/system_info.py:730: UserWarning: Specified path /usr/local/include/python3.6m is invalid.
return self.get_paths(self.section, key)
/home/yfeng1/anaconda3/install/lib/python3.6/site-packages/numpy/distutils/system_info.py:730: UserWarning: Specified path /usr/include/suitesparse/python3.6m is invalid.
return self.get_paths(self.section, key)
error: Command "/home/yfeng1/anaconda3/install/bin/x86_64-conda_cos6-linux-gnu-gfortran -Wall -g -ffixed-form -fno-second-underscore -fPIC -fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -O3 -funroll-loops -I/home/yfeng1/anaconda3/install/lib/python3.6/site-packages/numpy/core/include -c -c scipy/fftpack/src/dfftpack/dcosqb.f -o build/temp.linux-x86_64-3.6/scipy/fftpack/src/dfftpack/dcosqb.o" failed with exit status 1
```
### Numpy/Python version information:
1.16.2.
The environment variables regarding fortran are
```
FORTRANFLAGS=-fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe
GFORTRAN=/home/yfeng1/anaconda3/install/bin/x86_64-conda_cos6-linux-gnu-gfortran
DEBUG_FORTRANFLAGS=-fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-all -fno-plt -Og -g -Wall -Wextra -fcheck=all -fbacktrace -fimplicit-none -fvar-tracking-assignments -ffunction-sections -pipe
```
</issue>
<code>
[start of numpy/distutils/fcompiler/environment.py]
1 from __future__ import division, absolute_import, print_function
2
3 import os
4 import warnings
5 from distutils.dist import Distribution
6
7 __metaclass__ = type
8
9 class EnvironmentConfig(object):
10 def __init__(self, distutils_section='ALL', **kw):
11 self._distutils_section = distutils_section
12 self._conf_keys = kw
13 self._conf = None
14 self._hook_handler = None
15
16 def dump_variable(self, name):
17 conf_desc = self._conf_keys[name]
18 hook, envvar, confvar, convert, append = conf_desc
19 if not convert:
20 convert = lambda x : x
21 print('%s.%s:' % (self._distutils_section, name))
22 v = self._hook_handler(name, hook)
23 print(' hook : %s' % (convert(v),))
24 if envvar:
25 v = os.environ.get(envvar, None)
26 print(' environ: %s' % (convert(v),))
27 if confvar and self._conf:
28 v = self._conf.get(confvar, (None, None))[1]
29 print(' config : %s' % (convert(v),))
30
31 def dump_variables(self):
32 for name in self._conf_keys:
33 self.dump_variable(name)
34
35 def __getattr__(self, name):
36 try:
37 conf_desc = self._conf_keys[name]
38 except KeyError:
39 raise AttributeError(name)
40 return self._get_var(name, conf_desc)
41
42 def get(self, name, default=None):
43 try:
44 conf_desc = self._conf_keys[name]
45 except KeyError:
46 return default
47 var = self._get_var(name, conf_desc)
48 if var is None:
49 var = default
50 return var
51
52 def _get_var(self, name, conf_desc):
53 hook, envvar, confvar, convert, append = conf_desc
54 var = self._hook_handler(name, hook)
55 if envvar is not None:
56 envvar_contents = os.environ.get(envvar)
57 if envvar_contents is not None:
58 if var and append:
59 if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
60 var = var + [envvar_contents]
61 else:
62 var = envvar_contents
63 if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
64 msg = "{} is used as is, not appended ".format(envvar) + \
65 "to flags already defined " + \
66 "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \
67 "to obtain appending behavior instead (this " + \
68 "behavior will become default in a future release)."
69 warnings.warn(msg, UserWarning, stacklevel=3)
70 else:
71 var = envvar_contents
72 if confvar is not None and self._conf:
73 var = self._conf.get(confvar, (None, var))[1]
74 if convert is not None:
75 var = convert(var)
76 return var
77
78 def clone(self, hook_handler):
79 ec = self.__class__(distutils_section=self._distutils_section,
80 **self._conf_keys)
81 ec._hook_handler = hook_handler
82 return ec
83
84 def use_distribution(self, dist):
85 if isinstance(dist, Distribution):
86 self._conf = dist.get_option_dict(self._distutils_section)
87 else:
88 self._conf = dist
89
[end of numpy/distutils/fcompiler/environment.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -51,13 +51,16 @@
def _get_var(self, name, conf_desc):
hook, envvar, confvar, convert, append = conf_desc
+ if convert is None:
+ convert = lambda x: x
var = self._hook_handler(name, hook)
if envvar is not None:
envvar_contents = os.environ.get(envvar)
if envvar_contents is not None:
+ envvar_contents = convert(envvar_contents)
if var and append:
if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
- var = var + [envvar_contents]
+ var.extend(envvar_contents)
else:
var = envvar_contents
if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
@@ -70,11 +73,12 @@
else:
var = envvar_contents
if confvar is not None and self._conf:
- var = self._conf.get(confvar, (None, var))[1]
- if convert is not None:
- var = convert(var)
+ if confvar in self._conf:
+ source, confvar_contents = self._conf[confvar]
+ var = convert(confvar_contents)
return var
+
def clone(self, hook_handler):
ec = self.__class__(distutils_section=self._distutils_section,
**self._conf_keys)
| {"golden_diff": "diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py\n--- a/numpy/distutils/fcompiler/environment.py\n+++ b/numpy/distutils/fcompiler/environment.py\n@@ -51,13 +51,16 @@\n \n def _get_var(self, name, conf_desc):\n hook, envvar, confvar, convert, append = conf_desc\n+ if convert is None:\n+ convert = lambda x: x\n var = self._hook_handler(name, hook)\n if envvar is not None:\n envvar_contents = os.environ.get(envvar)\n if envvar_contents is not None:\n+ envvar_contents = convert(envvar_contents)\n if var and append:\n if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':\n- var = var + [envvar_contents]\n+ var.extend(envvar_contents)\n else:\n var = envvar_contents\n if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():\n@@ -70,11 +73,12 @@\n else:\n var = envvar_contents\n if confvar is not None and self._conf:\n- var = self._conf.get(confvar, (None, var))[1]\n- if convert is not None:\n- var = convert(var)\n+ if confvar in self._conf:\n+ source, confvar_contents = self._conf[confvar]\n+ var = convert(confvar_contents)\n return var\n \n+\n def clone(self, hook_handler):\n ec = self.__class__(distutils_section=self._distutils_section,\n **self._conf_keys)\n", "issue": "Failed to compile scipy with Anaconda gfortran.\n<!-- Please describe the issue in detail here, and fill in the fields below -->\r\n\r\nThe compilation fails with an error and prints a command it is trying to execute. \r\n\r\nIf I type the command at the end of the error log manually, it works.\r\n\r\nI think this points to a quotation error. The additional fortran flags from the environment shall be added to the command line list after shell-style string split. It will produce this exact problem if the full list of fortran flags are added as a single string.\r\n\r\n### Reproducing code example:\r\n\r\nclone scipy from github;\r\n\r\nactivate Anaconda build enviroment.\r\npython runtests.py\r\n\r\n### Error message:\r\n\r\n```\r\nf951: Error: unrecognized command line option '-fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe'\r\nRunning from scipy source directory.\r\n/home/yfeng1/anaconda3/install/lib/python3.6/site-packages/numpy/distutils/system_info.py:730: UserWarning: Specified path /usr/local/include/python3.6m is invalid.\r\n return self.get_paths(self.section, key)\r\n/home/yfeng1/anaconda3/install/lib/python3.6/site-packages/numpy/distutils/system_info.py:730: UserWarning: Specified path /usr/include/suitesparse/python3.6m is invalid.\r\n return self.get_paths(self.section, key)\r\nerror: Command \"/home/yfeng1/anaconda3/install/bin/x86_64-conda_cos6-linux-gnu-gfortran -Wall -g -ffixed-form -fno-second-underscore -fPIC -fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -O3 -funroll-loops -I/home/yfeng1/anaconda3/install/lib/python3.6/site-packages/numpy/core/include -c -c scipy/fftpack/src/dfftpack/dcosqb.f -o build/temp.linux-x86_64-3.6/scipy/fftpack/src/dfftpack/dcosqb.o\" failed with exit status 1\r\n```\r\n\r\n### Numpy/Python version information:\r\n\r\n1.16.2.\r\n\r\nThe environment variables regarding fortran are\r\n```\r\nFORTRANFLAGS=-fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe\r\nGFORTRAN=/home/yfeng1/anaconda3/install/bin/x86_64-conda_cos6-linux-gnu-gfortran\r\nDEBUG_FORTRANFLAGS=-fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe -fopenmp -march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-all -fno-plt -Og -g -Wall -Wextra -fcheck=all -fbacktrace -fimplicit-none -fvar-tracking-assignments -ffunction-sections -pipe\r\n```\n", "before_files": [{"content": "from __future__ import division, absolute_import, print_function\n\nimport os\nimport warnings\nfrom distutils.dist import Distribution\n\n__metaclass__ = type\n\nclass EnvironmentConfig(object):\n def __init__(self, distutils_section='ALL', **kw):\n self._distutils_section = distutils_section\n self._conf_keys = kw\n self._conf = None\n self._hook_handler = None\n\n def dump_variable(self, name):\n conf_desc = self._conf_keys[name]\n hook, envvar, confvar, convert, append = conf_desc\n if not convert:\n convert = lambda x : x\n print('%s.%s:' % (self._distutils_section, name))\n v = self._hook_handler(name, hook)\n print(' hook : %s' % (convert(v),))\n if envvar:\n v = os.environ.get(envvar, None)\n print(' environ: %s' % (convert(v),))\n if confvar and self._conf:\n v = self._conf.get(confvar, (None, None))[1]\n print(' config : %s' % (convert(v),))\n\n def dump_variables(self):\n for name in self._conf_keys:\n self.dump_variable(name)\n\n def __getattr__(self, name):\n try:\n conf_desc = self._conf_keys[name]\n except KeyError:\n raise AttributeError(name)\n return self._get_var(name, conf_desc)\n\n def get(self, name, default=None):\n try:\n conf_desc = self._conf_keys[name]\n except KeyError:\n return default\n var = self._get_var(name, conf_desc)\n if var is None:\n var = default\n return var\n\n def _get_var(self, name, conf_desc):\n hook, envvar, confvar, convert, append = conf_desc\n var = self._hook_handler(name, hook)\n if envvar is not None:\n envvar_contents = os.environ.get(envvar)\n if envvar_contents is not None:\n if var and append:\n if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':\n var = var + [envvar_contents]\n else:\n var = envvar_contents\n if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():\n msg = \"{} is used as is, not appended \".format(envvar) + \\\n \"to flags already defined \" + \\\n \"by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 \" + \\\n \"to obtain appending behavior instead (this \" + \\\n \"behavior will become default in a future release).\"\n warnings.warn(msg, UserWarning, stacklevel=3)\n else:\n var = envvar_contents\n if confvar is not None and self._conf:\n var = self._conf.get(confvar, (None, var))[1]\n if convert is not None:\n var = convert(var)\n return var\n\n def clone(self, hook_handler):\n ec = self.__class__(distutils_section=self._distutils_section,\n **self._conf_keys)\n ec._hook_handler = hook_handler\n return ec\n\n def use_distribution(self, dist):\n if isinstance(dist, Distribution):\n self._conf = dist.get_option_dict(self._distutils_section)\n else:\n self._conf = dist\n", "path": "numpy/distutils/fcompiler/environment.py"}]} | 2,177 | 365 |
gh_patches_debug_24492 | rasdani/github-patches | git_diff | scikit-hep__pyhf-186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Updated setup.py for pytorch > 0.4 dependency
# Description
I had 0.3.1 for Torch and that caused issues with some of the doctesting as the distributions did not have `cdf` methods. I forced an upgrade pytorch and things are fine now.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 setup(
3 name = 'pyhf',
4 version = '0.0.8',
5 description = '(partial) pure python histfactory implementation',
6 url = '',
7 author = 'Lukas Heinrich',
8 author_email = '[email protected]',
9 packages = find_packages(),
10 include_package_data = True,
11 install_requires = [
12 'numpy>=1.14.3',
13 'scipy'
14 ],
15 extras_require = {
16 'xmlimport': [
17 'uproot',
18 ],
19 'torch': [
20 'torch'
21 ],
22 'mxnet':[
23 'mxnet',
24 ],
25 'develop': [
26 'pyflakes',
27 'pytest>=3.5.1',
28 'pytest-cov>=2.5.1',
29 'pytest-benchmark[histogram]',
30 'python-coveralls',
31 'matplotlib',
32 'jupyter',
33 'uproot',
34 'papermill',
35 'torch',
36 'tensorflow',
37 'mxnet>=1.0.0',
38 'graphviz',
39 'sphinx',
40 'sphinxcontrib-bibtex',
41 'sphinxcontrib-napoleon',
42 'sphinx_rtd_theme',
43 'nbsphinx',
44 'jsonpatch',
45 'jsonschema>=2.6.0'
46 ]
47 },
48 entry_points = {
49 },
50 dependency_links = [
51 ]
52 )
53
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
packages = find_packages(),
include_package_data = True,
install_requires = [
- 'numpy>=1.14.3',
+ 'numpy<=1.14.5,>=1.14.3', # required by tensorflow, mxnet, and us
'scipy'
],
extras_require = {
@@ -17,10 +17,18 @@
'uproot',
],
'torch': [
- 'torch'
+ 'torch>=0.4.0'
],
'mxnet':[
- 'mxnet',
+ 'mxnet>=1.0.0',
+ 'requests<2.19.0,>=2.18.4',
+ 'numpy<1.15.0,>=1.8.2',
+ 'requests<2.19.0,>=2.18.4',
+ ],
+ 'tensorflow':[
+ 'tensorflow==1.10.0',
+ 'numpy<=1.14.5,>=1.13.3',
+ 'setuptools<=39.1.0',
],
'develop': [
'pyflakes',
@@ -28,13 +36,11 @@
'pytest-cov>=2.5.1',
'pytest-benchmark[histogram]',
'python-coveralls',
+ 'coverage==4.0.3', # coveralls
'matplotlib',
'jupyter',
'uproot',
'papermill',
- 'torch',
- 'tensorflow',
- 'mxnet>=1.0.0',
'graphviz',
'sphinx',
'sphinxcontrib-bibtex',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n- 'numpy>=1.14.3',\n+ 'numpy<=1.14.5,>=1.14.3', # required by tensorflow, mxnet, and us\n 'scipy'\n ],\n extras_require = {\n@@ -17,10 +17,18 @@\n 'uproot',\n ],\n 'torch': [\n- 'torch'\n+ 'torch>=0.4.0'\n ],\n 'mxnet':[\n- 'mxnet',\n+ 'mxnet>=1.0.0',\n+ 'requests<2.19.0,>=2.18.4',\n+ 'numpy<1.15.0,>=1.8.2',\n+ 'requests<2.19.0,>=2.18.4',\n+ ],\n+ 'tensorflow':[\n+ 'tensorflow==1.10.0',\n+ 'numpy<=1.14.5,>=1.13.3',\n+ 'setuptools<=39.1.0',\n ],\n 'develop': [\n 'pyflakes',\n@@ -28,13 +36,11 @@\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'python-coveralls',\n+ 'coverage==4.0.3', # coveralls\n 'matplotlib',\n 'jupyter',\n 'uproot',\n 'papermill',\n- 'torch',\n- 'tensorflow',\n- 'mxnet>=1.0.0',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n", "issue": "Updated setup.py for pytorch > 0.4 dependency\n# Description\r\n\r\nI had 0.3.1 for Torch and that caused issues with some of the doctesting as the distributions did not have `cdf` methods. I forced an upgrade pytorch and things are fine now.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nsetup(\n name = 'pyhf',\n version = '0.0.8',\n description = '(partial) pure python histfactory implementation',\n url = '',\n author = 'Lukas Heinrich',\n author_email = '[email protected]',\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n 'numpy>=1.14.3',\n 'scipy'\n ],\n extras_require = {\n 'xmlimport': [\n 'uproot',\n ],\n 'torch': [\n 'torch'\n ],\n 'mxnet':[\n 'mxnet',\n ],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'python-coveralls',\n 'matplotlib',\n 'jupyter',\n 'uproot',\n 'papermill',\n 'torch',\n 'tensorflow',\n 'mxnet>=1.0.0',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'jsonpatch',\n 'jsonschema>=2.6.0'\n ]\n },\n entry_points = {\n },\n dependency_links = [\n ]\n)\n", "path": "setup.py"}]} | 995 | 415 |
gh_patches_debug_31962 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-1086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
handle exceptions that occur inside of jinja
## Issue
### Issue description
Given sql with invalid jinja like:
```
{{ config(
materialized = "table",
schema = 'custom_schema' -- render this in a custom schema
) }}
```
dbt will throw a very unhelpful error that looks like:
```
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/parser/base_sql.py", line 95, in parse_sql_nodes
macros=macros)
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/parser/base.py", line 105, in parse_node
capture_macros=True)
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/clients/jinja.py", line 198, in get_rendered
return render_template(template, ctx, node)
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/clients/jinja.py", line 185, in render_template
return template.render(ctx)
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "<template>", line 1, in top-level template code
TypeError: exceptions must derive from BaseException
```
It will likely be difficult to pinpoint the exact error, but it should ideally be able to throw some sort of human-readable parsing error, indicating the model where the syntax error occurred.
</issue>
<code>
[start of dbt/clients/jinja.py]
1 import codecs
2 import linecache
3 import os
4
5 import jinja2
6 import jinja2._compat
7 import jinja2.ext
8 import jinja2.nodes
9 import jinja2.parser
10 import jinja2.sandbox
11
12 import dbt.compat
13 import dbt.exceptions
14
15 from dbt.node_types import NodeType
16 from dbt.utils import AttrDict
17
18 from dbt.logger import GLOBAL_LOGGER as logger # noqa
19
20
21 class MacroFuzzParser(jinja2.parser.Parser):
22 def parse_macro(self):
23 node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)
24
25 # modified to fuzz macros defined in the same file. this way
26 # dbt can understand the stack of macros being called.
27 # - @cmcarthur
28 node.name = dbt.utils.get_dbt_macro_name(
29 self.parse_assign_target(name_only=True).name)
30
31 self.parse_signature(node)
32 node.body = self.parse_statements(('name:endmacro',),
33 drop_needle=True)
34 return node
35
36
37 class MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):
38 def _parse(self, source, name, filename):
39 return MacroFuzzParser(
40 self, source, name,
41 jinja2._compat.encode_filename(filename)
42 ).parse()
43
44 def _compile(self, source, filename):
45 """Override jinja's compilation to stash the rendered source inside
46 the python linecache for debugging.
47 """
48 if filename == '<template>':
49 # make a better filename
50 filename = 'dbt-{}'.format(
51 codecs.encode(os.urandom(12), 'hex').decode('ascii')
52 )
53 # encode, though I don't think this matters
54 filename = jinja2._compat.encode_filename(filename)
55 # put ourselves in the cache using the 'lazycache' method
56 linecache.cache[filename] = (lambda: source,)
57
58 return super(MacroFuzzEnvironment, self)._compile(source, filename)
59
60
61 class TemplateCache(object):
62
63 def __init__(self):
64 self.file_cache = {}
65
66 def get_node_template(self, node):
67 key = (node['package_name'], node['original_file_path'])
68
69 if key in self.file_cache:
70 return self.file_cache[key]
71
72 template = get_template(
73 string=node.get('raw_sql'),
74 ctx={},
75 node=node
76 )
77 self.file_cache[key] = template
78
79 return template
80
81 def clear(self):
82 self.file_cache.clear()
83
84
85 template_cache = TemplateCache()
86
87
88 def macro_generator(node):
89 def apply_context(context):
90 def call(*args, **kwargs):
91 name = node.get('name')
92 template = template_cache.get_node_template(node)
93 module = template.make_module(context, False, context)
94
95 if node['resource_type'] == NodeType.Operation:
96 macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]
97 else:
98 macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]
99 module.__dict__.update(context)
100
101 try:
102 return macro(*args, **kwargs)
103 except dbt.exceptions.MacroReturn as e:
104 return e.value
105 except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:
106 dbt.exceptions.raise_compiler_error(str(e), node)
107 except dbt.exceptions.CompilationException as e:
108 e.stack.append(node)
109 raise e
110
111 return call
112 return apply_context
113
114
115 class MaterializationExtension(jinja2.ext.Extension):
116 tags = ['materialization']
117
118 def parse(self, parser):
119 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
120 materialization_name = \
121 parser.parse_assign_target(name_only=True).name
122
123 adapter_name = 'default'
124 node.args = []
125 node.defaults = []
126
127 while parser.stream.skip_if('comma'):
128 target = parser.parse_assign_target(name_only=True)
129
130 if target.name == 'default':
131 pass
132
133 elif target.name == 'adapter':
134 parser.stream.expect('assign')
135 value = parser.parse_expression()
136 adapter_name = value.value
137
138 else:
139 dbt.exceptions.invalid_materialization_argument(
140 materialization_name, target.name)
141
142 node.name = dbt.utils.get_materialization_macro_name(
143 materialization_name, adapter_name)
144
145 node.body = parser.parse_statements(('name:endmaterialization',),
146 drop_needle=True)
147
148 return node
149
150
151 class OperationExtension(jinja2.ext.Extension):
152 tags = ['operation']
153
154 def parse(self, parser):
155 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
156 operation_name = \
157 parser.parse_assign_target(name_only=True).name
158
159 node.args = []
160 node.defaults = []
161
162 while parser.stream.skip_if('comma'):
163 target = parser.parse_assign_target(name_only=True)
164
165 node.name = dbt.utils.get_operation_macro_name(operation_name)
166
167 node.body = parser.parse_statements(('name:endoperation',),
168 drop_needle=True)
169
170 return node
171
172
173 class DocumentationExtension(jinja2.ext.Extension):
174 tags = ['docs']
175
176 def parse(self, parser):
177 node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
178 docs_name = parser.parse_assign_target(name_only=True).name
179
180 node.args = []
181 node.defaults = []
182 node.name = dbt.utils.get_docs_macro_name(docs_name)
183 node.body = parser.parse_statements(('name:enddocs',),
184 drop_needle=True)
185 return node
186
187
188 def _is_dunder_name(name):
189 return name.startswith('__') and name.endswith('__')
190
191
192 def create_macro_capture_env(node):
193
194 class ParserMacroCapture(jinja2.Undefined):
195 """
196 This class sets up the parser to capture macros.
197 """
198 def __init__(self, hint=None, obj=None, name=None,
199 exc=None):
200 super(jinja2.Undefined, self).__init__()
201 self.node = node
202 self.name = name
203 self.package_name = node.get('package_name')
204 # jinja uses these for safety, so we have to override them.
205 # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa
206 self.unsafe_callable = False
207 self.alters_data = False
208
209 def __deepcopy__(self, memo):
210 path = os.path.join(self.node.get('root_path'),
211 self.node.get('original_file_path'))
212
213 logger.debug(
214 'A ParserMacroCapture has been deecopy()d, invalid reference '
215 'to "{}" in node {}.{} (source path: {})'
216 .format(self.name, self.node.get('package_name'),
217 self.node.get('name'),
218 path))
219
220 dbt.exceptions.raise_compiler_error(
221 'dbt has detected at least one invalid reference in {}.{}. '
222 'Check logs for more information'
223 .format(self.node.get('package_name'), self.node.get('name'))
224 )
225
226 def __getattr__(self, name):
227 if name == 'name' or _is_dunder_name(name):
228 raise AttributeError(
229 "'{}' object has no attribute '{}'"
230 .format(type(self).__name__, name)
231 )
232
233 self.package_name = self.name
234 self.name = name
235
236 return self
237
238 def __call__(self, *args, **kwargs):
239 return True
240
241 return ParserMacroCapture
242
243
244 def get_environment(node=None, capture_macros=False):
245 args = {
246 'extensions': []
247 }
248
249 if capture_macros:
250 args['undefined'] = create_macro_capture_env(node)
251
252 args['extensions'].append(MaterializationExtension)
253 args['extensions'].append(OperationExtension)
254 args['extensions'].append(DocumentationExtension)
255
256 return MacroFuzzEnvironment(**args)
257
258
259 def parse(string):
260 try:
261 return get_environment().parse(dbt.compat.to_string(string))
262
263 except (jinja2.exceptions.TemplateSyntaxError,
264 jinja2.exceptions.UndefinedError) as e:
265 e.translated = False
266 dbt.exceptions.raise_compiler_error(str(e))
267
268
269 def get_template(string, ctx, node=None, capture_macros=False):
270 try:
271 env = get_environment(node, capture_macros)
272
273 template_source = dbt.compat.to_string(string)
274 return env.from_string(template_source, globals=ctx)
275
276 except (jinja2.exceptions.TemplateSyntaxError,
277 jinja2.exceptions.UndefinedError) as e:
278 e.translated = False
279 dbt.exceptions.raise_compiler_error(str(e), node)
280
281
282 def render_template(template, ctx, node=None):
283 try:
284 return template.render(ctx)
285
286 except (jinja2.exceptions.TemplateSyntaxError,
287 jinja2.exceptions.UndefinedError) as e:
288 e.translated = False
289 dbt.exceptions.raise_compiler_error(str(e), node)
290
291
292 def get_rendered(string, ctx, node=None,
293 capture_macros=False):
294 template = get_template(string, ctx, node,
295 capture_macros=capture_macros)
296
297 return render_template(template, ctx, node)
298
299
300 def undefined_error(msg):
301 raise jinja2.exceptions.UndefinedError(msg)
302
[end of dbt/clients/jinja.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dbt/clients/jinja.py b/dbt/clients/jinja.py
--- a/dbt/clients/jinja.py
+++ b/dbt/clients/jinja.py
@@ -195,9 +195,8 @@
"""
This class sets up the parser to capture macros.
"""
- def __init__(self, hint=None, obj=None, name=None,
- exc=None):
- super(jinja2.Undefined, self).__init__()
+ def __init__(self, hint=None, obj=None, name=None, exc=None):
+ super(ParserMacroCapture, self).__init__(hint=hint, name=name)
self.node = node
self.name = name
self.package_name = node.get('package_name')
@@ -211,18 +210,22 @@
self.node.get('original_file_path'))
logger.debug(
- 'A ParserMacroCapture has been deecopy()d, invalid reference '
- 'to "{}" in node {}.{} (source path: {})'
+ 'dbt encountered an undefined variable, "{}" in node {}.{} '
+ '(source path: {})'
.format(self.name, self.node.get('package_name'),
- self.node.get('name'),
- path))
+ self.node.get('name'), path))
+ # match jinja's message
dbt.exceptions.raise_compiler_error(
- 'dbt has detected at least one invalid reference in {}.{}. '
- 'Check logs for more information'
- .format(self.node.get('package_name'), self.node.get('name'))
+ "{!r} is undefined".format(self.name),
+ node=self.node
)
+ def __getitem__(self, name):
+ # Propagate the undefined value if a caller accesses this as if it
+ # were a dictionary
+ return self
+
def __getattr__(self, name):
if name == 'name' or _is_dunder_name(name):
raise AttributeError(
| {"golden_diff": "diff --git a/dbt/clients/jinja.py b/dbt/clients/jinja.py\n--- a/dbt/clients/jinja.py\n+++ b/dbt/clients/jinja.py\n@@ -195,9 +195,8 @@\n \"\"\"\n This class sets up the parser to capture macros.\n \"\"\"\n- def __init__(self, hint=None, obj=None, name=None,\n- exc=None):\n- super(jinja2.Undefined, self).__init__()\n+ def __init__(self, hint=None, obj=None, name=None, exc=None):\n+ super(ParserMacroCapture, self).__init__(hint=hint, name=name)\n self.node = node\n self.name = name\n self.package_name = node.get('package_name')\n@@ -211,18 +210,22 @@\n self.node.get('original_file_path'))\n \n logger.debug(\n- 'A ParserMacroCapture has been deecopy()d, invalid reference '\n- 'to \"{}\" in node {}.{} (source path: {})'\n+ 'dbt encountered an undefined variable, \"{}\" in node {}.{} '\n+ '(source path: {})'\n .format(self.name, self.node.get('package_name'),\n- self.node.get('name'),\n- path))\n+ self.node.get('name'), path))\n \n+ # match jinja's message\n dbt.exceptions.raise_compiler_error(\n- 'dbt has detected at least one invalid reference in {}.{}. '\n- 'Check logs for more information'\n- .format(self.node.get('package_name'), self.node.get('name'))\n+ \"{!r} is undefined\".format(self.name),\n+ node=self.node\n )\n \n+ def __getitem__(self, name):\n+ # Propagate the undefined value if a caller accesses this as if it\n+ # were a dictionary\n+ return self\n+\n def __getattr__(self, name):\n if name == 'name' or _is_dunder_name(name):\n raise AttributeError(\n", "issue": "handle exceptions that occur inside of jinja\n## Issue\r\n\r\n### Issue description\r\nGiven sql with invalid jinja like:\r\n```\r\n{{ config(\r\n materialized = \"table\",\r\n schema = 'custom_schema' -- render this in a custom schema\r\n) }}\r\n```\r\n\r\ndbt will throw a very unhelpful error that looks like:\r\n```\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/parser/base_sql.py\", line 95, in parse_sql_nodes\r\n macros=macros)\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/parser/base.py\", line 105, in parse_node\r\n capture_macros=True)\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/clients/jinja.py\", line 198, in get_rendered\r\n return render_template(template, ctx, node)\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/dbt/clients/jinja.py\", line 185, in render_template\r\n return template.render(ctx)\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/asyncsupport.py\", line 76, in render\r\n return original_render(self, *args, **kwargs)\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/environment.py\", line 1008, in render\r\n return self.environment.handle_exception(exc_info, True)\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/environment.py\", line 780, in handle_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/local/Cellar/dbt/0.10.2/libexec/lib/python3.7/site-packages/jinja2/_compat.py\", line 37, in reraise\r\n raise value.with_traceback(tb)\r\n File \"<template>\", line 1, in top-level template code\r\nTypeError: exceptions must derive from BaseException\r\n```\r\n\r\nIt will likely be difficult to pinpoint the exact error, but it should ideally be able to throw some sort of human-readable parsing error, indicating the model where the syntax error occurred.\n", "before_files": [{"content": "import codecs\nimport linecache\nimport os\n\nimport jinja2\nimport jinja2._compat\nimport jinja2.ext\nimport jinja2.nodes\nimport jinja2.parser\nimport jinja2.sandbox\n\nimport dbt.compat\nimport dbt.exceptions\n\nfrom dbt.node_types import NodeType\nfrom dbt.utils import AttrDict\n\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\n\n\nclass MacroFuzzParser(jinja2.parser.Parser):\n def parse_macro(self):\n node = jinja2.nodes.Macro(lineno=next(self.stream).lineno)\n\n # modified to fuzz macros defined in the same file. this way\n # dbt can understand the stack of macros being called.\n # - @cmcarthur\n node.name = dbt.utils.get_dbt_macro_name(\n self.parse_assign_target(name_only=True).name)\n\n self.parse_signature(node)\n node.body = self.parse_statements(('name:endmacro',),\n drop_needle=True)\n return node\n\n\nclass MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):\n def _parse(self, source, name, filename):\n return MacroFuzzParser(\n self, source, name,\n jinja2._compat.encode_filename(filename)\n ).parse()\n\n def _compile(self, source, filename):\n \"\"\"Override jinja's compilation to stash the rendered source inside\n the python linecache for debugging.\n \"\"\"\n if filename == '<template>':\n # make a better filename\n filename = 'dbt-{}'.format(\n codecs.encode(os.urandom(12), 'hex').decode('ascii')\n )\n # encode, though I don't think this matters\n filename = jinja2._compat.encode_filename(filename)\n # put ourselves in the cache using the 'lazycache' method\n linecache.cache[filename] = (lambda: source,)\n\n return super(MacroFuzzEnvironment, self)._compile(source, filename)\n\n\nclass TemplateCache(object):\n\n def __init__(self):\n self.file_cache = {}\n\n def get_node_template(self, node):\n key = (node['package_name'], node['original_file_path'])\n\n if key in self.file_cache:\n return self.file_cache[key]\n\n template = get_template(\n string=node.get('raw_sql'),\n ctx={},\n node=node\n )\n self.file_cache[key] = template\n\n return template\n\n def clear(self):\n self.file_cache.clear()\n\n\ntemplate_cache = TemplateCache()\n\n\ndef macro_generator(node):\n def apply_context(context):\n def call(*args, **kwargs):\n name = node.get('name')\n template = template_cache.get_node_template(node)\n module = template.make_module(context, False, context)\n\n if node['resource_type'] == NodeType.Operation:\n macro = module.__dict__[dbt.utils.get_dbt_operation_name(name)]\n else:\n macro = module.__dict__[dbt.utils.get_dbt_macro_name(name)]\n module.__dict__.update(context)\n\n try:\n return macro(*args, **kwargs)\n except dbt.exceptions.MacroReturn as e:\n return e.value\n except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:\n dbt.exceptions.raise_compiler_error(str(e), node)\n except dbt.exceptions.CompilationException as e:\n e.stack.append(node)\n raise e\n\n return call\n return apply_context\n\n\nclass MaterializationExtension(jinja2.ext.Extension):\n tags = ['materialization']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n materialization_name = \\\n parser.parse_assign_target(name_only=True).name\n\n adapter_name = 'default'\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n if target.name == 'default':\n pass\n\n elif target.name == 'adapter':\n parser.stream.expect('assign')\n value = parser.parse_expression()\n adapter_name = value.value\n\n else:\n dbt.exceptions.invalid_materialization_argument(\n materialization_name, target.name)\n\n node.name = dbt.utils.get_materialization_macro_name(\n materialization_name, adapter_name)\n\n node.body = parser.parse_statements(('name:endmaterialization',),\n drop_needle=True)\n\n return node\n\n\nclass OperationExtension(jinja2.ext.Extension):\n tags = ['operation']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n operation_name = \\\n parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n\n while parser.stream.skip_if('comma'):\n target = parser.parse_assign_target(name_only=True)\n\n node.name = dbt.utils.get_operation_macro_name(operation_name)\n\n node.body = parser.parse_statements(('name:endoperation',),\n drop_needle=True)\n\n return node\n\n\nclass DocumentationExtension(jinja2.ext.Extension):\n tags = ['docs']\n\n def parse(self, parser):\n node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)\n docs_name = parser.parse_assign_target(name_only=True).name\n\n node.args = []\n node.defaults = []\n node.name = dbt.utils.get_docs_macro_name(docs_name)\n node.body = parser.parse_statements(('name:enddocs',),\n drop_needle=True)\n return node\n\n\ndef _is_dunder_name(name):\n return name.startswith('__') and name.endswith('__')\n\n\ndef create_macro_capture_env(node):\n\n class ParserMacroCapture(jinja2.Undefined):\n \"\"\"\n This class sets up the parser to capture macros.\n \"\"\"\n def __init__(self, hint=None, obj=None, name=None,\n exc=None):\n super(jinja2.Undefined, self).__init__()\n self.node = node\n self.name = name\n self.package_name = node.get('package_name')\n # jinja uses these for safety, so we have to override them.\n # see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa\n self.unsafe_callable = False\n self.alters_data = False\n\n def __deepcopy__(self, memo):\n path = os.path.join(self.node.get('root_path'),\n self.node.get('original_file_path'))\n\n logger.debug(\n 'A ParserMacroCapture has been deecopy()d, invalid reference '\n 'to \"{}\" in node {}.{} (source path: {})'\n .format(self.name, self.node.get('package_name'),\n self.node.get('name'),\n path))\n\n dbt.exceptions.raise_compiler_error(\n 'dbt has detected at least one invalid reference in {}.{}. '\n 'Check logs for more information'\n .format(self.node.get('package_name'), self.node.get('name'))\n )\n\n def __getattr__(self, name):\n if name == 'name' or _is_dunder_name(name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\"\n .format(type(self).__name__, name)\n )\n\n self.package_name = self.name\n self.name = name\n\n return self\n\n def __call__(self, *args, **kwargs):\n return True\n\n return ParserMacroCapture\n\n\ndef get_environment(node=None, capture_macros=False):\n args = {\n 'extensions': []\n }\n\n if capture_macros:\n args['undefined'] = create_macro_capture_env(node)\n\n args['extensions'].append(MaterializationExtension)\n args['extensions'].append(OperationExtension)\n args['extensions'].append(DocumentationExtension)\n\n return MacroFuzzEnvironment(**args)\n\n\ndef parse(string):\n try:\n return get_environment().parse(dbt.compat.to_string(string))\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e))\n\n\ndef get_template(string, ctx, node=None, capture_macros=False):\n try:\n env = get_environment(node, capture_macros)\n\n template_source = dbt.compat.to_string(string)\n return env.from_string(template_source, globals=ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef render_template(template, ctx, node=None):\n try:\n return template.render(ctx)\n\n except (jinja2.exceptions.TemplateSyntaxError,\n jinja2.exceptions.UndefinedError) as e:\n e.translated = False\n dbt.exceptions.raise_compiler_error(str(e), node)\n\n\ndef get_rendered(string, ctx, node=None,\n capture_macros=False):\n template = get_template(string, ctx, node,\n capture_macros=capture_macros)\n\n return render_template(template, ctx, node)\n\n\ndef undefined_error(msg):\n raise jinja2.exceptions.UndefinedError(msg)\n", "path": "dbt/clients/jinja.py"}]} | 3,872 | 440 |
gh_patches_debug_24803 | rasdani/github-patches | git_diff | facebookresearch__hydra-135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when using strict mode and multirun
Modifying the `6_sweep` example to have `@hydra.main(config_path="conf/config.yaml", strict=True)` causes this error:
```python3
6_sweep(master*)$ python3 experiment.py -m
Traceback (most recent call last):
File "experiment.py", line 15, in <module>
experiment()
File "/private/home/bda/repos/hydra/hydra/main.py", line 62, in decorated_main
run_hydra(get_args(), task_function, config_path, strict)
File "/private/home/bda/repos/hydra/hydra/_internal/utils.py", line 57, in run_hydra
hydra.multirun(overrides=args.overrides)
File "/private/home/bda/repos/hydra/hydra/_internal/hydra.py", line 124, in multirun
return sweeper.sweep(arguments=task_overrides)
File "/private/home/bda/repos/hydra/hydra/plugins/step_sweeper.py", line 63, in sweep
results = self.launcher.launch(batch)
File "/private/home/bda/repos/hydra/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py", line 67, in launch
self.config.hydra.job.num_jobs = num_jobs
File "/private/home/bda/anaconda3/lib/python3.7/site-packages/omegaconf-1.3.0-py3.7.egg/omegaconf/dictconfig.py", line 59, in __setattr__
self.__setitem__(key, value)
File "/private/home/bda/anaconda3/lib/python3.7/site-packages/omegaconf-1.3.0-py3.7.egg/omegaconf/dictconfig.py", line 28, in __setitem__
raise KeyError("Accessing unknown key in a struct : {}".format(self.get_full_key(key)))
KeyError: 'Accessing unknown key in a struct : hydra.job.num_jobs'
```
---
Perhaps hydra needs to add the right placeholders in the config here for the plugins to modify/write into, or the plugin needs to have a temporarily config that's not locked?
</issue>
<code>
[start of plugins/submitit/hydra_plugins/submitit/submitit_launcher.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 import os
4
5 import hydra._internal.utils
6 import hydra.plugins.common.utils
7 from hydra.plugins import Launcher
8
9 from omegaconf import open_dict
10
11 # pylint: disable=C0103
12 log = logging.getLogger(__name__)
13
14
15 class SubmititLauncher(Launcher):
16 def __init__(self, queue, folder, queue_parameters, conda_file=None):
17 self.queue = queue
18 self.queue_parameters = queue_parameters
19 self.folder = folder
20 self.conda_file = conda_file
21 self.config = None
22 self.task_function = None
23 self.verbose = None
24 self.sweep_configs = None
25 self.config_loader = None
26
27 def setup(self, config, config_loader, task_function, verbose):
28 self.config = config
29 self.config_loader = config_loader
30 self.task_function = task_function
31 self.verbose = verbose
32
33 def launch_job(self, sweep_overrides, job_dir_key, job_num):
34 hydra.plugins.common.utils.configure_log(None, self.verbose)
35 hydra.plugins.common.utils.setup_globals()
36 sweep_config = self.config_loader.load_sweep_config(
37 self.config, sweep_overrides
38 )
39 with open_dict(sweep_config):
40 # Populate new job variables
41 if "SLURM_JOB_ID" in os.environ:
42 sweep_config.hydra.job.id = os.environ["SLURM_JOB_ID"]
43 elif "CHRONOS_JOB_ID" in os.environ:
44 sweep_config.hydra.job.id = os.environ["CHRONOS_JOB_ID"]
45 else:
46 sweep_config.hydra.job.id = "unknown"
47
48 sweep_config.hydra.job.num = job_num
49 sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname(
50 sweep_config.hydra.overrides.task
51 )
52
53 return hydra.plugins.common.utils.run_job(
54 config=sweep_config,
55 task_function=self.task_function,
56 verbose=self.verbose,
57 job_dir_key=job_dir_key,
58 job_subdir_key="hydra.sweep.subdir",
59 )
60
61 def launch(self, job_overrides):
62 import submitit
63
64 num_jobs = len(job_overrides)
65 assert num_jobs > 0
66 self.config.hydra.job.num_jobs = num_jobs
67 if self.queue == "auto":
68 executor = submitit.AutoExecutor(
69 folder=self.folder, conda_file=self.conda_file
70 )
71 elif self.queue == "slurm":
72 executor = submitit.SlurmExecutor(folder=self.folder)
73 elif self.queue == "chronos":
74 executor = submitit.ChronosExecutor(
75 folder=self.folder, conda_file=self.conda_file
76 )
77 elif self.queue == "local":
78 executor = submitit.LocalExecutor(folder=self.folder)
79 else:
80 raise RuntimeError("Unsupported queue type {}".format(self.queue))
81
82 executor.update_parameters(**self.queue_parameters[self.queue])
83
84 log.info("Sweep output dir : {}".format(self.config.hydra.sweep.dir))
85 path_str = str(self.config.hydra.sweep.dir)
86 os.makedirs(path_str, exist_ok=True)
87 if self.config.hydra.sweep.mode is not None:
88 mode = int(str(self.config.hydra.sweep.mode), 8)
89 os.chmod(path_str, mode=mode)
90
91 jobs = []
92 for job_num in range(num_jobs):
93 sweep_override = list(job_overrides[job_num])
94 log.info(
95 "\t#{} : {}".format(
96 job_num,
97 " ".join(
98 hydra.plugins.common.utils.filter_overrides(sweep_override)
99 ),
100 )
101 )
102 job = executor.submit(
103 self.launch_job, sweep_override, "hydra.sweep.dir", job_num
104 )
105 jobs.append(job)
106
107 return [j.results() for j in jobs]
108
[end of plugins/submitit/hydra_plugins/submitit/submitit_launcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py
--- a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py
+++ b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py
@@ -60,11 +60,14 @@
)
def launch(self, job_overrides):
+ # lazy import to ensurue plugin discovery remains fast
import submitit
num_jobs = len(job_overrides)
assert num_jobs > 0
- self.config.hydra.job.num_jobs = num_jobs
+ with open_dict(self.config):
+ self.config.hydra.job.num_jobs = num_jobs
+
if self.queue == "auto":
executor = submitit.AutoExecutor(
folder=self.folder, conda_file=self.conda_file, par_file=self.par_file
@@ -85,7 +88,7 @@
log.info("Sweep output dir : {}".format(self.config.hydra.sweep.dir))
path_str = str(self.config.hydra.sweep.dir)
os.makedirs(path_str, exist_ok=True)
- if self.config.hydra.sweep.mode is not None:
+ if "mode" in self.config.hydra.sweep:
mode = int(str(self.config.hydra.sweep.mode), 8)
os.chmod(path_str, mode=mode)
| {"golden_diff": "diff --git a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py\n--- a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py\n+++ b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py\n@@ -60,11 +60,14 @@\n )\n \n def launch(self, job_overrides):\n+ # lazy import to ensurue plugin discovery remains fast\n import submitit\n \n num_jobs = len(job_overrides)\n assert num_jobs > 0\n- self.config.hydra.job.num_jobs = num_jobs\n+ with open_dict(self.config):\n+ self.config.hydra.job.num_jobs = num_jobs\n+\n if self.queue == \"auto\":\n executor = submitit.AutoExecutor(\n folder=self.folder, conda_file=self.conda_file, par_file=self.par_file\n@@ -85,7 +88,7 @@\n log.info(\"Sweep output dir : {}\".format(self.config.hydra.sweep.dir))\n path_str = str(self.config.hydra.sweep.dir)\n os.makedirs(path_str, exist_ok=True)\n- if self.config.hydra.sweep.mode is not None:\n+ if \"mode\" in self.config.hydra.sweep:\n mode = int(str(self.config.hydra.sweep.mode), 8)\n os.chmod(path_str, mode=mode)\n", "issue": "Error when using strict mode and multirun\nModifying the `6_sweep` example to have `@hydra.main(config_path=\"conf/config.yaml\", strict=True)` causes this error:\r\n\r\n```python3\r\n6_sweep(master*)$ python3 experiment.py -m\r\nTraceback (most recent call last):\r\n File \"experiment.py\", line 15, in <module>\r\n experiment()\r\n File \"/private/home/bda/repos/hydra/hydra/main.py\", line 62, in decorated_main\r\n run_hydra(get_args(), task_function, config_path, strict)\r\n File \"/private/home/bda/repos/hydra/hydra/_internal/utils.py\", line 57, in run_hydra\r\n hydra.multirun(overrides=args.overrides)\r\n File \"/private/home/bda/repos/hydra/hydra/_internal/hydra.py\", line 124, in multirun\r\n return sweeper.sweep(arguments=task_overrides)\r\n File \"/private/home/bda/repos/hydra/hydra/plugins/step_sweeper.py\", line 63, in sweep\r\n results = self.launcher.launch(batch)\r\n File \"/private/home/bda/repos/hydra/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py\", line 67, in launch\r\n self.config.hydra.job.num_jobs = num_jobs\r\n File \"/private/home/bda/anaconda3/lib/python3.7/site-packages/omegaconf-1.3.0-py3.7.egg/omegaconf/dictconfig.py\", line 59, in __setattr__\r\n self.__setitem__(key, value)\r\n File \"/private/home/bda/anaconda3/lib/python3.7/site-packages/omegaconf-1.3.0-py3.7.egg/omegaconf/dictconfig.py\", line 28, in __setitem__\r\n raise KeyError(\"Accessing unknown key in a struct : {}\".format(self.get_full_key(key)))\r\nKeyError: 'Accessing unknown key in a struct : hydra.job.num_jobs'\r\n```\r\n\r\n---\r\n\r\nPerhaps hydra needs to add the right placeholders in the config here for the plugins to modify/write into, or the plugin needs to have a temporarily config that's not locked?\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\n\nimport hydra._internal.utils\nimport hydra.plugins.common.utils\nfrom hydra.plugins import Launcher\n\nfrom omegaconf import open_dict\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\nclass SubmititLauncher(Launcher):\n def __init__(self, queue, folder, queue_parameters, conda_file=None):\n self.queue = queue\n self.queue_parameters = queue_parameters\n self.folder = folder\n self.conda_file = conda_file\n self.config = None\n self.task_function = None\n self.verbose = None\n self.sweep_configs = None\n self.config_loader = None\n\n def setup(self, config, config_loader, task_function, verbose):\n self.config = config\n self.config_loader = config_loader\n self.task_function = task_function\n self.verbose = verbose\n\n def launch_job(self, sweep_overrides, job_dir_key, job_num):\n hydra.plugins.common.utils.configure_log(None, self.verbose)\n hydra.plugins.common.utils.setup_globals()\n sweep_config = self.config_loader.load_sweep_config(\n self.config, sweep_overrides\n )\n with open_dict(sweep_config):\n # Populate new job variables\n if \"SLURM_JOB_ID\" in os.environ:\n sweep_config.hydra.job.id = os.environ[\"SLURM_JOB_ID\"]\n elif \"CHRONOS_JOB_ID\" in os.environ:\n sweep_config.hydra.job.id = os.environ[\"CHRONOS_JOB_ID\"]\n else:\n sweep_config.hydra.job.id = \"unknown\"\n\n sweep_config.hydra.job.num = job_num\n sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname(\n sweep_config.hydra.overrides.task\n )\n\n return hydra.plugins.common.utils.run_job(\n config=sweep_config,\n task_function=self.task_function,\n verbose=self.verbose,\n job_dir_key=job_dir_key,\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n\n def launch(self, job_overrides):\n import submitit\n\n num_jobs = len(job_overrides)\n assert num_jobs > 0\n self.config.hydra.job.num_jobs = num_jobs\n if self.queue == \"auto\":\n executor = submitit.AutoExecutor(\n folder=self.folder, conda_file=self.conda_file\n )\n elif self.queue == \"slurm\":\n executor = submitit.SlurmExecutor(folder=self.folder)\n elif self.queue == \"chronos\":\n executor = submitit.ChronosExecutor(\n folder=self.folder, conda_file=self.conda_file\n )\n elif self.queue == \"local\":\n executor = submitit.LocalExecutor(folder=self.folder)\n else:\n raise RuntimeError(\"Unsupported queue type {}\".format(self.queue))\n\n executor.update_parameters(**self.queue_parameters[self.queue])\n\n log.info(\"Sweep output dir : {}\".format(self.config.hydra.sweep.dir))\n path_str = str(self.config.hydra.sweep.dir)\n os.makedirs(path_str, exist_ok=True)\n if self.config.hydra.sweep.mode is not None:\n mode = int(str(self.config.hydra.sweep.mode), 8)\n os.chmod(path_str, mode=mode)\n\n jobs = []\n for job_num in range(num_jobs):\n sweep_override = list(job_overrides[job_num])\n log.info(\n \"\\t#{} : {}\".format(\n job_num,\n \" \".join(\n hydra.plugins.common.utils.filter_overrides(sweep_override)\n ),\n )\n )\n job = executor.submit(\n self.launch_job, sweep_override, \"hydra.sweep.dir\", job_num\n )\n jobs.append(job)\n\n return [j.results() for j in jobs]\n", "path": "plugins/submitit/hydra_plugins/submitit/submitit_launcher.py"}]} | 2,088 | 317 |
gh_patches_debug_40617 | rasdani/github-patches | git_diff | rasterio__rasterio-290 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Read default of masked=None breaks for some nodatavals
Back in #71, there was some controversy over the decision to have RasterReader.read's default operation as `masked=None`, which is both the slowest and most complicated behavior, and also is _different_ from `masked=False`.
Here's one case that took a colleage and I hours to debug: It's allowed have an image whose NoDataValue is outside the range of the dtype. If I want to read one of these, the default `read` behavior throws an error.
```
import numpy as np
data = np.array([[0, 0, 1], [1, 2, 0]])
kwargs = {
'driver': u'GTiff',
'dtype': 'uint8',
'nodata': -1.7e+308,
'height': 3,
'width': 3,
'count': 1
}
with rasterio.drivers():
with rasterio.open('foo.tif', 'w', **kwargs) as dst:
dst.write_band(1, data.astype(rasterio.uint8))
with rasterio.open('foo.tif') as src:
mask = src.read_band(1)
# TypeError: Fill value -1.7e+308 overflows dtype uint8
```
Obviously it's impossible for any raster values to have been flagged as NoDataValues, so the workaround is to ignore the NoDataValue entirely by setting `masked=False` (the distinction was a bit lost at first).
We probably don't want to change the default now, but here's what we can do:
- at the very least, print a warning suggesting that you may want `masked=False`
- I prefer tweaking behavior so that `masked=None` returns a masked array if at least one of the bands has a NoDataValue that is within the `dtype` bounds. (currently it only checks `nodataval is not None`)
Finally we should clarify the docstring: "masked if any of the nodatavals are not `None`", but "nodatavals" is not defined or referenced elsewhere in the documentation.
</issue>
<code>
[start of rasterio/rio/merge.py]
1 # Merge command.
2
3 import logging
4 import math
5 import os.path
6 import sys
7 import warnings
8
9 import click
10 from cligj import files_inout_arg, format_opt
11
12 import rasterio
13 from rasterio.rio.cli import cli
14 from rasterio.transform import Affine
15
16
17 @cli.command(short_help="Merge a stack of raster datasets.")
18 @files_inout_arg
19 @format_opt
20 @click.option('--bounds', nargs=4, type=float, default=None,
21 help="Output bounds: left, bottom, right, top.")
22 @click.option('--res', nargs=2, type=float, default=None,
23 help="Output dataset resolution: pixel width, pixel height")
24 @click.option('--nodata', '-n', type=float, default=None,
25 help="Override nodata values defined in input datasets")
26 @click.pass_context
27 def merge(ctx, files, driver, bounds, res, nodata):
28 """Copy valid pixels from input files to an output file.
29
30 All files must have the same number of bands, data type, and
31 coordinate reference system.
32
33 Input files are merged in their listed order using the reverse
34 painter's algorithm. If the output file exists, its values will be
35 overwritten by input values.
36
37 Geospatial bounds and resolution of a new output file in the
38 units of the input file coordinate reference system may be provided
39 and are otherwise taken from the first input file.
40 """
41 import numpy as np
42
43 verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
44 logger = logging.getLogger('rio')
45
46 try:
47 with rasterio.drivers(CPL_DEBUG=verbosity>2):
48 output = files[-1]
49 files = files[:-1]
50
51 with rasterio.open(files[0]) as first:
52 first_res = first.res
53 kwargs = first.meta
54 kwargs.pop('affine')
55 nodataval = first.nodatavals[0]
56 dtype = first.dtypes[0]
57
58 if os.path.exists(output):
59 # TODO: prompt user to update existing file (-i option) like:
60 # overwrite b.tif? (y/n [n]) n
61 # not overwritten
62 dst = rasterio.open(output, 'r+')
63 nodataval = dst.nodatavals[0]
64 dtype = dst.dtypes[0]
65 dest = np.zeros((dst.count,) + dst.shape, dtype=dtype)
66 else:
67 # Create new output file.
68 # Extent from option or extent of all inputs.
69 if not bounds:
70 # scan input files.
71 xs = []
72 ys = []
73 for f in files:
74 with rasterio.open(f) as src:
75 left, bottom, right, top = src.bounds
76 xs.extend([left, right])
77 ys.extend([bottom, top])
78 bounds = min(xs), min(ys), max(xs), max(ys)
79 output_transform = Affine.translation(bounds[0], bounds[3])
80
81 # Resolution/pixel size.
82 if not res:
83 res = first_res
84 output_transform *= Affine.scale(res[0], -res[1])
85
86 # Dataset shape.
87 output_width = int(math.ceil((bounds[2]-bounds[0])/res[0]))
88 output_height = int(math.ceil((bounds[3]-bounds[1])/res[1]))
89
90 kwargs['driver'] == driver
91 kwargs['transform'] = output_transform
92 kwargs['width'] = output_width
93 kwargs['height'] = output_height
94
95 dst = rasterio.open(output, 'w', **kwargs)
96 dest = np.zeros((first.count, output_height, output_width),
97 dtype=dtype)
98
99 if nodata is not None:
100 nodataval = nodata
101
102 if nodataval is not None:
103 # Only fill if the nodataval is within dtype's range.
104 inrange = False
105 if np.dtype(dtype).kind in ('i', 'u'):
106 info = np.iinfo(dtype)
107 inrange = (info.min <= nodataval <= info.max)
108 elif np.dtype(dtype).kind == 'f':
109 info = np.finfo(dtype)
110 inrange = (info.min <= nodataval <= info.max)
111 if inrange:
112 dest.fill(nodataval)
113 else:
114 warnings.warn(
115 "Input file's nodata value, %s, is beyond the valid "
116 "range of its data type, %s. Consider overriding it "
117 "using the --nodata option for better results." % (
118 nodataval, dtype))
119 else:
120 nodataval = 0
121
122 for fname in reversed(files):
123 with rasterio.open(fname) as src:
124 # Real World (tm) use of boundless reads.
125 # This approach uses the maximum amount of memory to solve
126 # the problem. Making it more efficient is a TODO.
127 window = src.window(*dst.bounds)
128 data = np.zeros_like(dest)
129 data = src.read(
130 out=data,
131 window=window,
132 boundless=True,
133 masked=True)
134 np.copyto(dest, data,
135 where=np.logical_and(
136 dest==nodataval, data.mask==False))
137
138 if dst.mode == 'r+':
139 data = dst.read(masked=True)
140 np.copyto(dest, data,
141 where=np.logical_and(
142 dest==nodataval, data.mask==False))
143
144 dst.write(dest)
145 dst.close()
146
147 sys.exit(0)
148 except Exception:
149 logger.exception("Failed. Exception caught")
150 sys.exit(1)
151
[end of rasterio/rio/merge.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py
--- a/rasterio/rio/merge.py
+++ b/rasterio/rio/merge.py
@@ -92,10 +92,16 @@
kwargs['width'] = output_width
kwargs['height'] = output_height
+ logger.debug("Kwargs: %r", kwargs)
+ logger.debug("bounds: %r", bounds)
+ logger.debug("Res: %r", res)
+
dst = rasterio.open(output, 'w', **kwargs)
dest = np.zeros((first.count, output_height, output_width),
dtype=dtype)
+ logger.debug("In merge, dest shape: %r", dest.shape)
+
if nodata is not None:
nodataval = nodata
@@ -119,27 +125,54 @@
else:
nodataval = 0
+ dst_w, dst_s, dst_e, dst_n = dst.bounds
+
for fname in reversed(files):
with rasterio.open(fname) as src:
# Real World (tm) use of boundless reads.
# This approach uses the maximum amount of memory to solve
# the problem. Making it more efficient is a TODO.
- window = src.window(*dst.bounds)
- data = np.zeros_like(dest)
- data = src.read(
- out=data,
- window=window,
- boundless=True,
+
+ # 1. Compute spatial intersection of destination
+ # and source.
+ src_w, src_s, src_e, src_n = src.bounds
+
+ int_w = src_w if src_w > dst_w else dst_w
+ int_s = src_s if src_s > dst_s else dst_s
+ int_e = src_e if src_e < dst_e else dst_e
+ int_n = src_n if src_n < dst_n else dst_n
+
+ # 2. Compute the source window.
+ src_window = src.window(int_w, int_s, int_e, int_n)
+
+ # 3. Compute the destination window.
+ dst_window = dst.window(int_w, int_s, int_e, int_n)
+
+ # 4. Initialize temp array.
+ temp = np.zeros(
+ (first.count,) + tuple(b - a for a, b in dst_window),
+ dtype=dtype)
+
+ temp = src.read(
+ out=temp,
+ window=src_window,
+ boundless=False,
masked=True)
- np.copyto(dest, data,
+
+ # 5. Copy elements of temp into dest.
+ roff, coff = dst.index(int_w, int_n)
+ h, w = temp.shape[-2:]
+
+ region = dest[:,roff:roff+h,coff:coff+w]
+ np.copyto(region, temp,
where=np.logical_and(
- dest==nodataval, data.mask==False))
+ region==nodataval, temp.mask==False))
if dst.mode == 'r+':
- data = dst.read(masked=True)
- np.copyto(dest, data,
+ temp = dst.read(masked=True)
+ np.copyto(dest, temp,
where=np.logical_and(
- dest==nodataval, data.mask==False))
+ dest==nodataval, temp.mask==False))
dst.write(dest)
dst.close()
| {"golden_diff": "diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py\n--- a/rasterio/rio/merge.py\n+++ b/rasterio/rio/merge.py\n@@ -92,10 +92,16 @@\n kwargs['width'] = output_width\n kwargs['height'] = output_height\n \n+ logger.debug(\"Kwargs: %r\", kwargs)\n+ logger.debug(\"bounds: %r\", bounds)\n+ logger.debug(\"Res: %r\", res)\n+\n dst = rasterio.open(output, 'w', **kwargs)\n dest = np.zeros((first.count, output_height, output_width),\n dtype=dtype)\n \n+ logger.debug(\"In merge, dest shape: %r\", dest.shape)\n+\n if nodata is not None:\n nodataval = nodata\n \n@@ -119,27 +125,54 @@\n else:\n nodataval = 0\n \n+ dst_w, dst_s, dst_e, dst_n = dst.bounds\n+\n for fname in reversed(files):\n with rasterio.open(fname) as src:\n # Real World (tm) use of boundless reads.\n # This approach uses the maximum amount of memory to solve\n # the problem. Making it more efficient is a TODO.\n- window = src.window(*dst.bounds)\n- data = np.zeros_like(dest)\n- data = src.read(\n- out=data,\n- window=window,\n- boundless=True,\n+\n+ # 1. Compute spatial intersection of destination\n+ # and source.\n+ src_w, src_s, src_e, src_n = src.bounds\n+\n+ int_w = src_w if src_w > dst_w else dst_w\n+ int_s = src_s if src_s > dst_s else dst_s\n+ int_e = src_e if src_e < dst_e else dst_e\n+ int_n = src_n if src_n < dst_n else dst_n\n+\n+ # 2. Compute the source window.\n+ src_window = src.window(int_w, int_s, int_e, int_n)\n+\n+ # 3. Compute the destination window.\n+ dst_window = dst.window(int_w, int_s, int_e, int_n)\n+\n+ # 4. Initialize temp array.\n+ temp = np.zeros(\n+ (first.count,) + tuple(b - a for a, b in dst_window),\n+ dtype=dtype)\n+\n+ temp = src.read(\n+ out=temp,\n+ window=src_window,\n+ boundless=False,\n masked=True)\n- np.copyto(dest, data,\n+\n+ # 5. Copy elements of temp into dest.\n+ roff, coff = dst.index(int_w, int_n)\n+ h, w = temp.shape[-2:]\n+\n+ region = dest[:,roff:roff+h,coff:coff+w]\n+ np.copyto(region, temp,\n where=np.logical_and(\n- dest==nodataval, data.mask==False))\n+ region==nodataval, temp.mask==False))\n \n if dst.mode == 'r+':\n- data = dst.read(masked=True)\n- np.copyto(dest, data,\n+ temp = dst.read(masked=True)\n+ np.copyto(dest, temp,\n where=np.logical_and(\n- dest==nodataval, data.mask==False))\n+ dest==nodataval, temp.mask==False))\n \n dst.write(dest)\n dst.close()\n", "issue": "Read default of masked=None breaks for some nodatavals\nBack in #71, there was some controversy over the decision to have RasterReader.read's default operation as `masked=None`, which is both the slowest and most complicated behavior, and also is _different_ from `masked=False`.\n\nHere's one case that took a colleage and I hours to debug: It's allowed have an image whose NoDataValue is outside the range of the dtype. If I want to read one of these, the default `read` behavior throws an error.\n\n```\nimport numpy as np\ndata = np.array([[0, 0, 1], [1, 2, 0]])\nkwargs = {\n 'driver': u'GTiff',\n 'dtype': 'uint8',\n 'nodata': -1.7e+308,\n 'height': 3,\n 'width': 3,\n 'count': 1\n}\nwith rasterio.drivers():\n with rasterio.open('foo.tif', 'w', **kwargs) as dst:\n dst.write_band(1, data.astype(rasterio.uint8))\n\n with rasterio.open('foo.tif') as src:\n mask = src.read_band(1)\n# TypeError: Fill value -1.7e+308 overflows dtype uint8\n```\n\nObviously it's impossible for any raster values to have been flagged as NoDataValues, so the workaround is to ignore the NoDataValue entirely by setting `masked=False` (the distinction was a bit lost at first).\n\nWe probably don't want to change the default now, but here's what we can do:\n- at the very least, print a warning suggesting that you may want `masked=False`\n- I prefer tweaking behavior so that `masked=None` returns a masked array if at least one of the bands has a NoDataValue that is within the `dtype` bounds. (currently it only checks `nodataval is not None`)\n\nFinally we should clarify the docstring: \"masked if any of the nodatavals are not `None`\", but \"nodatavals\" is not defined or referenced elsewhere in the documentation.\n\n", "before_files": [{"content": "# Merge command.\n\nimport logging\nimport math\nimport os.path\nimport sys\nimport warnings\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nimport rasterio\nfrom rasterio.rio.cli import cli\nfrom rasterio.transform import Affine\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\n@format_opt\[email protected]('--bounds', nargs=4, type=float, default=None,\n help=\"Output bounds: left, bottom, right, top.\")\[email protected]('--res', nargs=2, type=float, default=None,\n help=\"Output dataset resolution: pixel width, pixel height\")\[email protected]('--nodata', '-n', type=float, default=None,\n help=\"Override nodata values defined in input datasets\")\[email protected]_context\ndef merge(ctx, files, driver, bounds, res, nodata):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n \"\"\"\n import numpy as np\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n logger = logging.getLogger('rio')\n\n try:\n with rasterio.drivers(CPL_DEBUG=verbosity>2):\n output = files[-1]\n files = files[:-1]\n\n with rasterio.open(files[0]) as first:\n first_res = first.res\n kwargs = first.meta\n kwargs.pop('affine')\n nodataval = first.nodatavals[0]\n dtype = first.dtypes[0]\n\n if os.path.exists(output):\n # TODO: prompt user to update existing file (-i option) like:\n # overwrite b.tif? (y/n [n]) n\n # not overwritten\n dst = rasterio.open(output, 'r+')\n nodataval = dst.nodatavals[0]\n dtype = dst.dtypes[0]\n dest = np.zeros((dst.count,) + dst.shape, dtype=dtype)\n else:\n # Create new output file.\n # Extent from option or extent of all inputs.\n if not bounds:\n # scan input files.\n xs = []\n ys = []\n for f in files:\n with rasterio.open(f) as src:\n left, bottom, right, top = src.bounds\n xs.extend([left, right])\n ys.extend([bottom, top])\n bounds = min(xs), min(ys), max(xs), max(ys)\n output_transform = Affine.translation(bounds[0], bounds[3])\n\n # Resolution/pixel size.\n if not res:\n res = first_res\n output_transform *= Affine.scale(res[0], -res[1])\n\n # Dataset shape.\n output_width = int(math.ceil((bounds[2]-bounds[0])/res[0]))\n output_height = int(math.ceil((bounds[3]-bounds[1])/res[1]))\n\n kwargs['driver'] == driver\n kwargs['transform'] = output_transform\n kwargs['width'] = output_width\n kwargs['height'] = output_height\n\n dst = rasterio.open(output, 'w', **kwargs)\n dest = np.zeros((first.count, output_height, output_width),\n dtype=dtype)\n\n if nodata is not None:\n nodataval = nodata\n\n if nodataval is not None:\n # Only fill if the nodataval is within dtype's range.\n inrange = False\n if np.dtype(dtype).kind in ('i', 'u'):\n info = np.iinfo(dtype)\n inrange = (info.min <= nodataval <= info.max)\n elif np.dtype(dtype).kind == 'f':\n info = np.finfo(dtype)\n inrange = (info.min <= nodataval <= info.max)\n if inrange:\n dest.fill(nodataval)\n else:\n warnings.warn(\n \"Input file's nodata value, %s, is beyond the valid \"\n \"range of its data type, %s. Consider overriding it \"\n \"using the --nodata option for better results.\" % (\n nodataval, dtype))\n else:\n nodataval = 0\n\n for fname in reversed(files):\n with rasterio.open(fname) as src:\n # Real World (tm) use of boundless reads.\n # This approach uses the maximum amount of memory to solve\n # the problem. Making it more efficient is a TODO.\n window = src.window(*dst.bounds)\n data = np.zeros_like(dest)\n data = src.read(\n out=data,\n window=window,\n boundless=True,\n masked=True)\n np.copyto(dest, data,\n where=np.logical_and(\n dest==nodataval, data.mask==False))\n\n if dst.mode == 'r+':\n data = dst.read(masked=True)\n np.copyto(dest, data,\n where=np.logical_and(\n dest==nodataval, data.mask==False))\n\n dst.write(dest)\n dst.close()\n\n sys.exit(0)\n except Exception:\n logger.exception(\"Failed. Exception caught\")\n sys.exit(1)\n", "path": "rasterio/rio/merge.py"}]} | 2,537 | 769 |
gh_patches_debug_29392 | rasdani/github-patches | git_diff | goauthentik__authentik-7588 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User write stages do not trigger a Model Updated event when writing to an existing user
**Describe the bug**
User write stages do not trigger a "Model updated" event when the user is existing if the stage is after an email stage with "activate pending user on success".
**To Reproduce**
Steps to reproduce the behavior:
1. Create a new enrollment flow
2. Add a prompt stage to enter username email and name
3. Do a user write, make sure it always creates a new user and store the user as inactive
4. Add an email stage, and enable "Activate pending user on success"
5. Add a prompt stage to set the password
6. Do another user write, make sure it never creates a user
7. Do a log in
During this process there is no Model Updated event sent in the second user write.
However, when you remove the email stage, it will trigger this event.
It also triggers the correct event in the user-write on a user settings flow for example, so it seems to be related to the activation stage.
**Expected behavior**
A user write should trigger a Model Updated event when modifying an existing user, as it also triggers a Model Created event when a new user is written to.
**Version and Deployment (please complete the following information):**
- authentik version: 2023.1.2
</issue>
<code>
[start of authentik/events/middleware.py]
1 """Events middleware"""
2 from functools import partial
3 from threading import Thread
4 from typing import Any, Callable, Optional
5
6 from django.conf import settings
7 from django.contrib.sessions.models import Session
8 from django.core.exceptions import SuspiciousOperation
9 from django.db.models import Model
10 from django.db.models.signals import m2m_changed, post_save, pre_delete
11 from django.http import HttpRequest, HttpResponse
12 from guardian.models import UserObjectPermission
13
14 from authentik.core.models import (
15 AuthenticatedSession,
16 Group,
17 PropertyMapping,
18 Provider,
19 Source,
20 User,
21 UserSourceConnection,
22 )
23 from authentik.events.models import Event, EventAction, Notification
24 from authentik.events.utils import model_to_dict
25 from authentik.flows.models import FlowToken, Stage
26 from authentik.lib.sentry import before_send
27 from authentik.lib.utils.errors import exception_to_string
28 from authentik.outposts.models import OutpostServiceConnection
29 from authentik.policies.models import Policy, PolicyBindingModel
30 from authentik.providers.oauth2.models import AccessToken, AuthorizationCode, RefreshToken
31 from authentik.providers.scim.models import SCIMGroup, SCIMUser
32 from authentik.stages.authenticator_static.models import StaticToken
33
34 IGNORED_MODELS = (
35 Event,
36 Notification,
37 UserObjectPermission,
38 AuthenticatedSession,
39 StaticToken,
40 Session,
41 FlowToken,
42 Provider,
43 Source,
44 PropertyMapping,
45 UserSourceConnection,
46 Stage,
47 OutpostServiceConnection,
48 Policy,
49 PolicyBindingModel,
50 AuthorizationCode,
51 AccessToken,
52 RefreshToken,
53 SCIMUser,
54 SCIMGroup,
55 )
56
57
58 def should_log_model(model: Model) -> bool:
59 """Return true if operation on `model` should be logged"""
60 if model.__module__.startswith("silk"):
61 return False
62 return model.__class__ not in IGNORED_MODELS
63
64
65 def should_log_m2m(model: Model) -> bool:
66 """Return true if m2m operation should be logged"""
67 if model.__class__ in [User, Group]:
68 return True
69 return False
70
71
72 class EventNewThread(Thread):
73 """Create Event in background thread"""
74
75 action: str
76 request: HttpRequest
77 kwargs: dict[str, Any]
78 user: Optional[User] = None
79
80 def __init__(self, action: str, request: HttpRequest, user: Optional[User] = None, **kwargs):
81 super().__init__()
82 self.action = action
83 self.request = request
84 self.user = user
85 self.kwargs = kwargs
86
87 def run(self):
88 Event.new(self.action, **self.kwargs).from_http(self.request, user=self.user)
89
90
91 class AuditMiddleware:
92 """Register handlers for duration of request-response that log creation/update/deletion
93 of models"""
94
95 get_response: Callable[[HttpRequest], HttpResponse]
96
97 def __init__(self, get_response: Callable[[HttpRequest], HttpResponse]):
98 self.get_response = get_response
99
100 def connect(self, request: HttpRequest):
101 """Connect signal for automatic logging"""
102 if not hasattr(request, "user"):
103 return
104 if not getattr(request.user, "is_authenticated", False):
105 return
106 if not hasattr(request, "request_id"):
107 return
108 post_save_handler = partial(self.post_save_handler, user=request.user, request=request)
109 pre_delete_handler = partial(self.pre_delete_handler, user=request.user, request=request)
110 m2m_changed_handler = partial(self.m2m_changed_handler, user=request.user, request=request)
111 post_save.connect(
112 post_save_handler,
113 dispatch_uid=request.request_id,
114 weak=False,
115 )
116 pre_delete.connect(
117 pre_delete_handler,
118 dispatch_uid=request.request_id,
119 weak=False,
120 )
121 m2m_changed.connect(
122 m2m_changed_handler,
123 dispatch_uid=request.request_id,
124 weak=False,
125 )
126
127 def disconnect(self, request: HttpRequest):
128 """Disconnect signals"""
129 if not hasattr(request, "request_id"):
130 return
131 post_save.disconnect(dispatch_uid=request.request_id)
132 pre_delete.disconnect(dispatch_uid=request.request_id)
133 m2m_changed.disconnect(dispatch_uid=request.request_id)
134
135 def __call__(self, request: HttpRequest) -> HttpResponse:
136 self.connect(request)
137
138 response = self.get_response(request)
139
140 self.disconnect(request)
141 return response
142
143 def process_exception(self, request: HttpRequest, exception: Exception):
144 """Disconnect handlers in case of exception"""
145 self.disconnect(request)
146
147 if settings.DEBUG:
148 return
149 # Special case for SuspiciousOperation, we have a special event action for that
150 if isinstance(exception, SuspiciousOperation):
151 thread = EventNewThread(
152 EventAction.SUSPICIOUS_REQUEST,
153 request,
154 message=str(exception),
155 )
156 thread.run()
157 elif before_send({}, {"exc_info": (None, exception, None)}) is not None:
158 thread = EventNewThread(
159 EventAction.SYSTEM_EXCEPTION,
160 request,
161 message=exception_to_string(exception),
162 )
163 thread.run()
164
165 @staticmethod
166 def post_save_handler(
167 user: User, request: HttpRequest, sender, instance: Model, created: bool, **_
168 ):
169 """Signal handler for all object's post_save"""
170 if not should_log_model(instance):
171 return
172
173 action = EventAction.MODEL_CREATED if created else EventAction.MODEL_UPDATED
174 EventNewThread(action, request, user=user, model=model_to_dict(instance)).run()
175
176 @staticmethod
177 def pre_delete_handler(user: User, request: HttpRequest, sender, instance: Model, **_):
178 """Signal handler for all object's pre_delete"""
179 if not should_log_model(instance): # pragma: no cover
180 return
181
182 EventNewThread(
183 EventAction.MODEL_DELETED,
184 request,
185 user=user,
186 model=model_to_dict(instance),
187 ).run()
188
189 @staticmethod
190 def m2m_changed_handler(
191 user: User, request: HttpRequest, sender, instance: Model, action: str, **_
192 ):
193 """Signal handler for all object's m2m_changed"""
194 if action not in ["pre_add", "pre_remove", "post_clear"]:
195 return
196 if not should_log_m2m(instance):
197 return
198
199 EventNewThread(
200 EventAction.MODEL_UPDATED,
201 request,
202 user=user,
203 model=model_to_dict(instance),
204 ).run()
205
[end of authentik/events/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/events/middleware.py b/authentik/events/middleware.py
--- a/authentik/events/middleware.py
+++ b/authentik/events/middleware.py
@@ -93,21 +93,30 @@
of models"""
get_response: Callable[[HttpRequest], HttpResponse]
+ anonymous_user: User = None
def __init__(self, get_response: Callable[[HttpRequest], HttpResponse]):
self.get_response = get_response
+ def _ensure_fallback_user(self):
+ """Defer fetching anonymous user until we have to"""
+ if self.anonymous_user:
+ return
+ from guardian.shortcuts import get_anonymous_user
+
+ self.anonymous_user = get_anonymous_user()
+
def connect(self, request: HttpRequest):
"""Connect signal for automatic logging"""
- if not hasattr(request, "user"):
- return
- if not getattr(request.user, "is_authenticated", False):
- return
+ self._ensure_fallback_user()
+ user = getattr(request, "user", self.anonymous_user)
+ if not user.is_authenticated:
+ user = self.anonymous_user
if not hasattr(request, "request_id"):
return
- post_save_handler = partial(self.post_save_handler, user=request.user, request=request)
- pre_delete_handler = partial(self.pre_delete_handler, user=request.user, request=request)
- m2m_changed_handler = partial(self.m2m_changed_handler, user=request.user, request=request)
+ post_save_handler = partial(self.post_save_handler, user=user, request=request)
+ pre_delete_handler = partial(self.pre_delete_handler, user=user, request=request)
+ m2m_changed_handler = partial(self.m2m_changed_handler, user=user, request=request)
post_save.connect(
post_save_handler,
dispatch_uid=request.request_id,
| {"golden_diff": "diff --git a/authentik/events/middleware.py b/authentik/events/middleware.py\n--- a/authentik/events/middleware.py\n+++ b/authentik/events/middleware.py\n@@ -93,21 +93,30 @@\n of models\"\"\"\n \n get_response: Callable[[HttpRequest], HttpResponse]\n+ anonymous_user: User = None\n \n def __init__(self, get_response: Callable[[HttpRequest], HttpResponse]):\n self.get_response = get_response\n \n+ def _ensure_fallback_user(self):\n+ \"\"\"Defer fetching anonymous user until we have to\"\"\"\n+ if self.anonymous_user:\n+ return\n+ from guardian.shortcuts import get_anonymous_user\n+\n+ self.anonymous_user = get_anonymous_user()\n+\n def connect(self, request: HttpRequest):\n \"\"\"Connect signal for automatic logging\"\"\"\n- if not hasattr(request, \"user\"):\n- return\n- if not getattr(request.user, \"is_authenticated\", False):\n- return\n+ self._ensure_fallback_user()\n+ user = getattr(request, \"user\", self.anonymous_user)\n+ if not user.is_authenticated:\n+ user = self.anonymous_user\n if not hasattr(request, \"request_id\"):\n return\n- post_save_handler = partial(self.post_save_handler, user=request.user, request=request)\n- pre_delete_handler = partial(self.pre_delete_handler, user=request.user, request=request)\n- m2m_changed_handler = partial(self.m2m_changed_handler, user=request.user, request=request)\n+ post_save_handler = partial(self.post_save_handler, user=user, request=request)\n+ pre_delete_handler = partial(self.pre_delete_handler, user=user, request=request)\n+ m2m_changed_handler = partial(self.m2m_changed_handler, user=user, request=request)\n post_save.connect(\n post_save_handler,\n dispatch_uid=request.request_id,\n", "issue": "User write stages do not trigger a Model Updated event when writing to an existing user\n**Describe the bug**\r\nUser write stages do not trigger a \"Model updated\" event when the user is existing if the stage is after an email stage with \"activate pending user on success\".\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a new enrollment flow\r\n2. Add a prompt stage to enter username email and name\r\n3. Do a user write, make sure it always creates a new user and store the user as inactive\r\n4. Add an email stage, and enable \"Activate pending user on success\"\r\n5. Add a prompt stage to set the password\r\n6. Do another user write, make sure it never creates a user\r\n7. Do a log in\r\n\r\nDuring this process there is no Model Updated event sent in the second user write.\r\nHowever, when you remove the email stage, it will trigger this event.\r\nIt also triggers the correct event in the user-write on a user settings flow for example, so it seems to be related to the activation stage.\r\n\r\n**Expected behavior**\r\nA user write should trigger a Model Updated event when modifying an existing user, as it also triggers a Model Created event when a new user is written to.\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: 2023.1.2\r\n\n", "before_files": [{"content": "\"\"\"Events middleware\"\"\"\nfrom functools import partial\nfrom threading import Thread\nfrom typing import Any, Callable, Optional\n\nfrom django.conf import settings\nfrom django.contrib.sessions.models import Session\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.db.models import Model\nfrom django.db.models.signals import m2m_changed, post_save, pre_delete\nfrom django.http import HttpRequest, HttpResponse\nfrom guardian.models import UserObjectPermission\n\nfrom authentik.core.models import (\n AuthenticatedSession,\n Group,\n PropertyMapping,\n Provider,\n Source,\n User,\n UserSourceConnection,\n)\nfrom authentik.events.models import Event, EventAction, Notification\nfrom authentik.events.utils import model_to_dict\nfrom authentik.flows.models import FlowToken, Stage\nfrom authentik.lib.sentry import before_send\nfrom authentik.lib.utils.errors import exception_to_string\nfrom authentik.outposts.models import OutpostServiceConnection\nfrom authentik.policies.models import Policy, PolicyBindingModel\nfrom authentik.providers.oauth2.models import AccessToken, AuthorizationCode, RefreshToken\nfrom authentik.providers.scim.models import SCIMGroup, SCIMUser\nfrom authentik.stages.authenticator_static.models import StaticToken\n\nIGNORED_MODELS = (\n Event,\n Notification,\n UserObjectPermission,\n AuthenticatedSession,\n StaticToken,\n Session,\n FlowToken,\n Provider,\n Source,\n PropertyMapping,\n UserSourceConnection,\n Stage,\n OutpostServiceConnection,\n Policy,\n PolicyBindingModel,\n AuthorizationCode,\n AccessToken,\n RefreshToken,\n SCIMUser,\n SCIMGroup,\n)\n\n\ndef should_log_model(model: Model) -> bool:\n \"\"\"Return true if operation on `model` should be logged\"\"\"\n if model.__module__.startswith(\"silk\"):\n return False\n return model.__class__ not in IGNORED_MODELS\n\n\ndef should_log_m2m(model: Model) -> bool:\n \"\"\"Return true if m2m operation should be logged\"\"\"\n if model.__class__ in [User, Group]:\n return True\n return False\n\n\nclass EventNewThread(Thread):\n \"\"\"Create Event in background thread\"\"\"\n\n action: str\n request: HttpRequest\n kwargs: dict[str, Any]\n user: Optional[User] = None\n\n def __init__(self, action: str, request: HttpRequest, user: Optional[User] = None, **kwargs):\n super().__init__()\n self.action = action\n self.request = request\n self.user = user\n self.kwargs = kwargs\n\n def run(self):\n Event.new(self.action, **self.kwargs).from_http(self.request, user=self.user)\n\n\nclass AuditMiddleware:\n \"\"\"Register handlers for duration of request-response that log creation/update/deletion\n of models\"\"\"\n\n get_response: Callable[[HttpRequest], HttpResponse]\n\n def __init__(self, get_response: Callable[[HttpRequest], HttpResponse]):\n self.get_response = get_response\n\n def connect(self, request: HttpRequest):\n \"\"\"Connect signal for automatic logging\"\"\"\n if not hasattr(request, \"user\"):\n return\n if not getattr(request.user, \"is_authenticated\", False):\n return\n if not hasattr(request, \"request_id\"):\n return\n post_save_handler = partial(self.post_save_handler, user=request.user, request=request)\n pre_delete_handler = partial(self.pre_delete_handler, user=request.user, request=request)\n m2m_changed_handler = partial(self.m2m_changed_handler, user=request.user, request=request)\n post_save.connect(\n post_save_handler,\n dispatch_uid=request.request_id,\n weak=False,\n )\n pre_delete.connect(\n pre_delete_handler,\n dispatch_uid=request.request_id,\n weak=False,\n )\n m2m_changed.connect(\n m2m_changed_handler,\n dispatch_uid=request.request_id,\n weak=False,\n )\n\n def disconnect(self, request: HttpRequest):\n \"\"\"Disconnect signals\"\"\"\n if not hasattr(request, \"request_id\"):\n return\n post_save.disconnect(dispatch_uid=request.request_id)\n pre_delete.disconnect(dispatch_uid=request.request_id)\n m2m_changed.disconnect(dispatch_uid=request.request_id)\n\n def __call__(self, request: HttpRequest) -> HttpResponse:\n self.connect(request)\n\n response = self.get_response(request)\n\n self.disconnect(request)\n return response\n\n def process_exception(self, request: HttpRequest, exception: Exception):\n \"\"\"Disconnect handlers in case of exception\"\"\"\n self.disconnect(request)\n\n if settings.DEBUG:\n return\n # Special case for SuspiciousOperation, we have a special event action for that\n if isinstance(exception, SuspiciousOperation):\n thread = EventNewThread(\n EventAction.SUSPICIOUS_REQUEST,\n request,\n message=str(exception),\n )\n thread.run()\n elif before_send({}, {\"exc_info\": (None, exception, None)}) is not None:\n thread = EventNewThread(\n EventAction.SYSTEM_EXCEPTION,\n request,\n message=exception_to_string(exception),\n )\n thread.run()\n\n @staticmethod\n def post_save_handler(\n user: User, request: HttpRequest, sender, instance: Model, created: bool, **_\n ):\n \"\"\"Signal handler for all object's post_save\"\"\"\n if not should_log_model(instance):\n return\n\n action = EventAction.MODEL_CREATED if created else EventAction.MODEL_UPDATED\n EventNewThread(action, request, user=user, model=model_to_dict(instance)).run()\n\n @staticmethod\n def pre_delete_handler(user: User, request: HttpRequest, sender, instance: Model, **_):\n \"\"\"Signal handler for all object's pre_delete\"\"\"\n if not should_log_model(instance): # pragma: no cover\n return\n\n EventNewThread(\n EventAction.MODEL_DELETED,\n request,\n user=user,\n model=model_to_dict(instance),\n ).run()\n\n @staticmethod\n def m2m_changed_handler(\n user: User, request: HttpRequest, sender, instance: Model, action: str, **_\n ):\n \"\"\"Signal handler for all object's m2m_changed\"\"\"\n if action not in [\"pre_add\", \"pre_remove\", \"post_clear\"]:\n return\n if not should_log_m2m(instance):\n return\n\n EventNewThread(\n EventAction.MODEL_UPDATED,\n request,\n user=user,\n model=model_to_dict(instance),\n ).run()\n", "path": "authentik/events/middleware.py"}]} | 2,691 | 401 |
gh_patches_debug_6542 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-3071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update to use new version of Hologram
As an extension of https://github.com/fishtown-analytics/hologram/issues/40 -- support Mashumaro in Hologram -- makes changes to pull in version 0.0.13 of Hologram.
</issue>
<code>
[start of core/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 def read(fname):
23 return open(os.path.join(os.path.dirname(__file__), fname)).read()
24
25
26 package_name = "dbt-core"
27 package_version = "0.19.0"
28 description = """dbt (data build tool) is a command line tool that helps \
29 analysts and engineers transform data in their warehouse more effectively"""
30
31
32 setup(
33 name=package_name,
34 version=package_version,
35 description=description,
36 long_description=description,
37 author="Fishtown Analytics",
38 author_email="[email protected]",
39 url="https://github.com/fishtown-analytics/dbt",
40 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
41 package_data={
42 'dbt': [
43 'include/index.html',
44 'include/global_project/dbt_project.yml',
45 'include/global_project/docs/*.md',
46 'include/global_project/macros/*.sql',
47 'include/global_project/macros/**/*.sql',
48 'include/global_project/macros/**/**/*.sql',
49 'py.typed',
50 ]
51 },
52 test_suite='test',
53 entry_points={
54 'console_scripts': [
55 'dbt = dbt.main:main',
56 ],
57 },
58 scripts=[
59 'scripts/dbt',
60 ],
61 install_requires=[
62 'Jinja2==2.11.2',
63 'PyYAML>=3.11',
64 'sqlparse>=0.2.3,<0.4',
65 'networkx>=2.3,<3',
66 'minimal-snowplow-tracker==0.0.2',
67 'colorama>=0.3.9,<0.4.4',
68 'agate>=1.6,<2',
69 'isodate>=0.6,<0.7',
70 'json-rpc>=1.12,<2',
71 'werkzeug>=0.15,<2.0',
72 'dataclasses==0.6;python_version<"3.7"',
73 # 'hologram==0.0.12', # must be updated prior to release
74 'logbook>=1.5,<1.6',
75 'typing-extensions>=3.7.4,<3.8',
76 # the following are all to match snowflake-connector-python
77 'requests>=2.18.0,<2.24.0',
78 'idna<2.10',
79 'cffi>=1.9,<1.15',
80 ],
81 zip_safe=False,
82 classifiers=[
83 'Development Status :: 5 - Production/Stable',
84
85 'License :: OSI Approved :: Apache Software License',
86
87 'Operating System :: Microsoft :: Windows',
88 'Operating System :: MacOS :: MacOS X',
89 'Operating System :: POSIX :: Linux',
90
91 'Programming Language :: Python :: 3.6',
92 'Programming Language :: Python :: 3.7',
93 'Programming Language :: Python :: 3.8',
94 'Programming Language :: Python :: 3.9',
95 ],
96 python_requires=">=3.6.3",
97 )
98
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -70,7 +70,7 @@
'json-rpc>=1.12,<2',
'werkzeug>=0.15,<2.0',
'dataclasses==0.6;python_version<"3.7"',
- # 'hologram==0.0.12', # must be updated prior to release
+ 'hologram==0.0.13',
'logbook>=1.5,<1.6',
'typing-extensions>=3.7.4,<3.8',
# the following are all to match snowflake-connector-python
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -70,7 +70,7 @@\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses==0.6;python_version<\"3.7\"',\n- # 'hologram==0.0.12', # must be updated prior to release\n+ 'hologram==0.0.13',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n", "issue": "Update to use new version of Hologram\nAs an extension of https://github.com/fishtown-analytics/hologram/issues/40 -- support Mashumaro in Hologram -- makes changes to pull in version 0.0.13 of Hologram. \n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.19.0\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.4.4',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses==0.6;python_version<\"3.7\"',\n # 'hologram==0.0.12', # must be updated prior to release\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.24.0',\n 'idna<2.10',\n 'cffi>=1.9,<1.15',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}]} | 1,570 | 159 |
gh_patches_debug_35137 | rasdani/github-patches | git_diff | LibraryOfCongress__concordia-354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MetricsModelMixin is missing from several models
</issue>
<code>
[start of concordia/models.py]
1 from logging import getLogger
2
3 from django.conf import settings
4 from django.contrib.auth.models import User
5 from django.contrib.postgres.fields import JSONField
6 from django.core.validators import RegexValidator
7 from django.db import models
8 from django.urls import reverse
9 from django.utils import timezone
10 from django_prometheus_metrics.models import MetricsModelMixin
11
12
13 logger = getLogger(__name__)
14
15 metadata_default = dict
16
17 User._meta.get_field("email").__dict__["_unique"] = True
18
19
20 class UserProfile(MetricsModelMixin("userprofile"), models.Model):
21 user = models.ForeignKey(User, on_delete=models.CASCADE)
22
23
24 class Status:
25 # FIXME: determine whether this is actually universally applicable to all of
26 # our models or should be split into subsets
27 EDIT = "Edit"
28 SUBMITTED = "Submitted"
29 COMPLETED = "Completed"
30 INACTIVE = "Inactive"
31 ACTIVE = "Active"
32
33 DEFAULT = EDIT
34 CHOICES = (
35 (EDIT, "Open for Edit"),
36 (SUBMITTED, "Submitted for Review"),
37 (COMPLETED, "Transcription Completed"),
38 (INACTIVE, "Inactive"),
39 (ACTIVE, "Active"),
40 )
41
42 #: Convenience lookup dictionary for CHOICES:
43 CHOICE_MAP = dict(CHOICES)
44
45
46 class MediaType:
47 IMAGE = "IMG"
48 AUDIO = "AUD"
49 VIDEO = "VID"
50
51 CHOICES = ((IMAGE, "Image"), (AUDIO, "Audio"), (VIDEO, "Video"))
52
53
54 class PublicationManager(models.Manager):
55 def published(self):
56 return self.get_queryset().filter(published=True)
57
58 def unpublished(self):
59 return self.get_queryset().filter(published=False)
60
61
62 class Campaign(MetricsModelMixin("campaign"), models.Model):
63 objects = PublicationManager()
64
65 published = models.BooleanField(default=False, blank=True)
66 status = models.CharField(
67 max_length=10, choices=Status.CHOICES, default=Status.DEFAULT
68 )
69
70 title = models.CharField(max_length=80)
71 slug = models.SlugField(max_length=80, unique=True)
72 description = models.TextField(blank=True)
73
74 start_date = models.DateTimeField(null=True, blank=True)
75 end_date = models.DateTimeField(null=True, blank=True)
76
77 metadata = JSONField(default=metadata_default, blank=True, null=True)
78
79 def __str__(self):
80 return self.title
81
82 def get_absolute_url(self):
83 # FIXME: change this with https://github.com/LibraryOfCongress/concordia/issues/242
84 return reverse("transcriptions:campaign", args=(self.slug,))
85
86
87 class Project(models.Model):
88 objects = PublicationManager()
89
90 campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)
91 title = models.CharField(max_length=80)
92 slug = models.SlugField(max_length=80)
93
94 category = models.CharField(max_length=12, blank=True)
95 metadata = JSONField(default=metadata_default, blank=True, null=True)
96 status = models.CharField(
97 max_length=10, choices=Status.CHOICES, default=Status.DEFAULT
98 )
99 published = models.BooleanField(default=False, blank=True)
100
101 class Meta:
102 unique_together = (("slug", "campaign"),)
103 ordering = ["title"]
104
105 def __str__(self):
106 return self.title
107
108 def get_absolute_url(self):
109 return reverse(
110 "transcriptions:project-detail",
111 kwargs={"campaign_slug": self.campaign.slug, "slug": self.slug},
112 )
113
114
115 class Item(models.Model):
116 objects = PublicationManager()
117
118 project = models.ForeignKey(
119 Project, on_delete=models.CASCADE, blank=True, null=True
120 )
121
122 published = models.BooleanField(default=False, blank=True)
123
124 title = models.CharField(max_length=300)
125 item_url = models.URLField(max_length=255)
126 item_id = models.CharField(
127 max_length=100, help_text="Unique item ID assigned by the upstream source"
128 )
129 description = models.TextField(blank=True)
130 metadata = JSONField(
131 default=metadata_default,
132 blank=True,
133 null=True,
134 help_text="Raw metadata returned by the remote API",
135 )
136 thumbnail_url = models.URLField(max_length=255, blank=True, null=True)
137 status = models.CharField(
138 max_length=10, choices=Status.CHOICES, default=Status.DEFAULT
139 )
140
141 class Meta:
142 unique_together = (("item_id", "project"),)
143
144 def __str__(self):
145 return f"{self.item_id}: {self.title}"
146
147 def get_absolute_url(self):
148 return reverse(
149 "transcriptions:item",
150 kwargs={
151 "campaign_slug": self.project.campaign.slug,
152 "project_slug": self.project.slug,
153 "item_id": self.item_id,
154 },
155 )
156
157
158 class Asset(models.Model):
159 item = models.ForeignKey(Item, on_delete=models.CASCADE)
160
161 title = models.CharField(max_length=100)
162 slug = models.SlugField(max_length=100)
163
164 description = models.TextField(blank=True)
165 # TODO: do we really need this given that we import in lock-step sequence
166 # numbers with a fixed extension?
167 media_url = models.TextField("Path component of the URL", max_length=255)
168 media_type = models.CharField(
169 max_length=4, choices=MediaType.CHOICES, db_index=True
170 )
171 sequence = models.PositiveIntegerField(default=1)
172
173 # The original ID of the image resource on loc.gov
174 resource_id = models.CharField(max_length=100, blank=True, null=True)
175 # The URL used to download this image from loc.gov
176 download_url = models.CharField(max_length=255, blank=True, null=True)
177
178 metadata = JSONField(default=metadata_default, blank=True, null=True)
179 status = models.CharField(
180 max_length=10, choices=Status.CHOICES, default=Status.DEFAULT
181 )
182
183 class Meta:
184 unique_together = (("slug", "item"),)
185 ordering = ["title", "sequence"]
186
187 def __str__(self):
188 return self.title
189
190 def get_absolute_url(self):
191 return reverse(
192 "transcriptions:asset-detail",
193 kwargs={
194 "campaign_slug": self.item.project.campaign.slug,
195 "project_slug": self.item.project.slug,
196 "item_id": self.item.item_id,
197 "slug": self.slug,
198 },
199 )
200
201
202 class Tag(models.Model):
203 TAG_VALIDATOR = RegexValidator(r"^[- _'\w]{1,50}$")
204 value = models.CharField(max_length=50, validators=[TAG_VALIDATOR])
205
206 def __str__(self):
207 return self.value
208
209
210 class UserAssetTagCollection(models.Model):
211 asset = models.ForeignKey(Asset, on_delete=models.CASCADE)
212
213 user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
214
215 tags = models.ManyToManyField(Tag, blank=True)
216 created_on = models.DateTimeField(auto_now_add=True)
217 updated_on = models.DateTimeField(auto_now=True)
218
219 def __str__(self):
220 return "{} - {}".format(self.asset, self.user)
221
222
223 class Transcription(models.Model):
224 asset = models.ForeignKey(Asset, on_delete=models.CASCADE)
225
226 user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
227
228 text = models.TextField(blank=True)
229 status = models.CharField(
230 max_length=10, choices=Status.CHOICES, default=Status.DEFAULT
231 )
232
233 created_on = models.DateTimeField(auto_now_add=True)
234 updated_on = models.DateTimeField(auto_now=True)
235
236 def __str__(self):
237 return str(self.asset)
238
239
240 class AssetTranscriptionReservation(models.Model):
241 """
242 Records a user's reservation to transcribe a particular asset
243 """
244
245 asset = models.OneToOneField(Asset, on_delete=models.CASCADE)
246 user = models.ForeignKey(User, on_delete=models.CASCADE)
247
248 created_on = models.DateTimeField(editable=False, auto_now_add=True)
249 updated_on = models.DateTimeField(auto_now=True)
250
[end of concordia/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/concordia/models.py b/concordia/models.py
--- a/concordia/models.py
+++ b/concordia/models.py
@@ -84,7 +84,7 @@
return reverse("transcriptions:campaign", args=(self.slug,))
-class Project(models.Model):
+class Project(MetricsModelMixin("project"), models.Model):
objects = PublicationManager()
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)
@@ -112,7 +112,7 @@
)
-class Item(models.Model):
+class Item(MetricsModelMixin("item"), models.Model):
objects = PublicationManager()
project = models.ForeignKey(
@@ -155,7 +155,7 @@
)
-class Asset(models.Model):
+class Asset(MetricsModelMixin("asset"), models.Model):
item = models.ForeignKey(Item, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
@@ -199,7 +199,7 @@
)
-class Tag(models.Model):
+class Tag(MetricsModelMixin("tag"), models.Model):
TAG_VALIDATOR = RegexValidator(r"^[- _'\w]{1,50}$")
value = models.CharField(max_length=50, validators=[TAG_VALIDATOR])
@@ -207,7 +207,9 @@
return self.value
-class UserAssetTagCollection(models.Model):
+class UserAssetTagCollection(
+ MetricsModelMixin("user_asset_tag_collection"), models.Model
+):
asset = models.ForeignKey(Asset, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
@@ -220,7 +222,7 @@
return "{} - {}".format(self.asset, self.user)
-class Transcription(models.Model):
+class Transcription(MetricsModelMixin("transcription"), models.Model):
asset = models.ForeignKey(Asset, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
| {"golden_diff": "diff --git a/concordia/models.py b/concordia/models.py\n--- a/concordia/models.py\n+++ b/concordia/models.py\n@@ -84,7 +84,7 @@\n return reverse(\"transcriptions:campaign\", args=(self.slug,))\n \n \n-class Project(models.Model):\n+class Project(MetricsModelMixin(\"project\"), models.Model):\n objects = PublicationManager()\n \n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)\n@@ -112,7 +112,7 @@\n )\n \n \n-class Item(models.Model):\n+class Item(MetricsModelMixin(\"item\"), models.Model):\n objects = PublicationManager()\n \n project = models.ForeignKey(\n@@ -155,7 +155,7 @@\n )\n \n \n-class Asset(models.Model):\n+class Asset(MetricsModelMixin(\"asset\"), models.Model):\n item = models.ForeignKey(Item, on_delete=models.CASCADE)\n \n title = models.CharField(max_length=100)\n@@ -199,7 +199,7 @@\n )\n \n \n-class Tag(models.Model):\n+class Tag(MetricsModelMixin(\"tag\"), models.Model):\n TAG_VALIDATOR = RegexValidator(r\"^[- _'\\w]{1,50}$\")\n value = models.CharField(max_length=50, validators=[TAG_VALIDATOR])\n \n@@ -207,7 +207,9 @@\n return self.value\n \n \n-class UserAssetTagCollection(models.Model):\n+class UserAssetTagCollection(\n+ MetricsModelMixin(\"user_asset_tag_collection\"), models.Model\n+):\n asset = models.ForeignKey(Asset, on_delete=models.CASCADE)\n \n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n@@ -220,7 +222,7 @@\n return \"{} - {}\".format(self.asset, self.user)\n \n \n-class Transcription(models.Model):\n+class Transcription(MetricsModelMixin(\"transcription\"), models.Model):\n asset = models.ForeignKey(Asset, on_delete=models.CASCADE)\n \n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n", "issue": "MetricsModelMixin is missing from several models\n\n", "before_files": [{"content": "from logging import getLogger\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.postgres.fields import JSONField\nfrom django.core.validators import RegexValidator\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django_prometheus_metrics.models import MetricsModelMixin\n\n\nlogger = getLogger(__name__)\n\nmetadata_default = dict\n\nUser._meta.get_field(\"email\").__dict__[\"_unique\"] = True\n\n\nclass UserProfile(MetricsModelMixin(\"userprofile\"), models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n\nclass Status:\n # FIXME: determine whether this is actually universally applicable to all of\n # our models or should be split into subsets\n EDIT = \"Edit\"\n SUBMITTED = \"Submitted\"\n COMPLETED = \"Completed\"\n INACTIVE = \"Inactive\"\n ACTIVE = \"Active\"\n\n DEFAULT = EDIT\n CHOICES = (\n (EDIT, \"Open for Edit\"),\n (SUBMITTED, \"Submitted for Review\"),\n (COMPLETED, \"Transcription Completed\"),\n (INACTIVE, \"Inactive\"),\n (ACTIVE, \"Active\"),\n )\n\n #: Convenience lookup dictionary for CHOICES:\n CHOICE_MAP = dict(CHOICES)\n\n\nclass MediaType:\n IMAGE = \"IMG\"\n AUDIO = \"AUD\"\n VIDEO = \"VID\"\n\n CHOICES = ((IMAGE, \"Image\"), (AUDIO, \"Audio\"), (VIDEO, \"Video\"))\n\n\nclass PublicationManager(models.Manager):\n def published(self):\n return self.get_queryset().filter(published=True)\n\n def unpublished(self):\n return self.get_queryset().filter(published=False)\n\n\nclass Campaign(MetricsModelMixin(\"campaign\"), models.Model):\n objects = PublicationManager()\n\n published = models.BooleanField(default=False, blank=True)\n status = models.CharField(\n max_length=10, choices=Status.CHOICES, default=Status.DEFAULT\n )\n\n title = models.CharField(max_length=80)\n slug = models.SlugField(max_length=80, unique=True)\n description = models.TextField(blank=True)\n\n start_date = models.DateTimeField(null=True, blank=True)\n end_date = models.DateTimeField(null=True, blank=True)\n\n metadata = JSONField(default=metadata_default, blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n # FIXME: change this with https://github.com/LibraryOfCongress/concordia/issues/242\n return reverse(\"transcriptions:campaign\", args=(self.slug,))\n\n\nclass Project(models.Model):\n objects = PublicationManager()\n\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)\n title = models.CharField(max_length=80)\n slug = models.SlugField(max_length=80)\n\n category = models.CharField(max_length=12, blank=True)\n metadata = JSONField(default=metadata_default, blank=True, null=True)\n status = models.CharField(\n max_length=10, choices=Status.CHOICES, default=Status.DEFAULT\n )\n published = models.BooleanField(default=False, blank=True)\n\n class Meta:\n unique_together = ((\"slug\", \"campaign\"),)\n ordering = [\"title\"]\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\n \"transcriptions:project-detail\",\n kwargs={\"campaign_slug\": self.campaign.slug, \"slug\": self.slug},\n )\n\n\nclass Item(models.Model):\n objects = PublicationManager()\n\n project = models.ForeignKey(\n Project, on_delete=models.CASCADE, blank=True, null=True\n )\n\n published = models.BooleanField(default=False, blank=True)\n\n title = models.CharField(max_length=300)\n item_url = models.URLField(max_length=255)\n item_id = models.CharField(\n max_length=100, help_text=\"Unique item ID assigned by the upstream source\"\n )\n description = models.TextField(blank=True)\n metadata = JSONField(\n default=metadata_default,\n blank=True,\n null=True,\n help_text=\"Raw metadata returned by the remote API\",\n )\n thumbnail_url = models.URLField(max_length=255, blank=True, null=True)\n status = models.CharField(\n max_length=10, choices=Status.CHOICES, default=Status.DEFAULT\n )\n\n class Meta:\n unique_together = ((\"item_id\", \"project\"),)\n\n def __str__(self):\n return f\"{self.item_id}: {self.title}\"\n\n def get_absolute_url(self):\n return reverse(\n \"transcriptions:item\",\n kwargs={\n \"campaign_slug\": self.project.campaign.slug,\n \"project_slug\": self.project.slug,\n \"item_id\": self.item_id,\n },\n )\n\n\nclass Asset(models.Model):\n item = models.ForeignKey(Item, on_delete=models.CASCADE)\n\n title = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n\n description = models.TextField(blank=True)\n # TODO: do we really need this given that we import in lock-step sequence\n # numbers with a fixed extension?\n media_url = models.TextField(\"Path component of the URL\", max_length=255)\n media_type = models.CharField(\n max_length=4, choices=MediaType.CHOICES, db_index=True\n )\n sequence = models.PositiveIntegerField(default=1)\n\n # The original ID of the image resource on loc.gov\n resource_id = models.CharField(max_length=100, blank=True, null=True)\n # The URL used to download this image from loc.gov\n download_url = models.CharField(max_length=255, blank=True, null=True)\n\n metadata = JSONField(default=metadata_default, blank=True, null=True)\n status = models.CharField(\n max_length=10, choices=Status.CHOICES, default=Status.DEFAULT\n )\n\n class Meta:\n unique_together = ((\"slug\", \"item\"),)\n ordering = [\"title\", \"sequence\"]\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\n \"transcriptions:asset-detail\",\n kwargs={\n \"campaign_slug\": self.item.project.campaign.slug,\n \"project_slug\": self.item.project.slug,\n \"item_id\": self.item.item_id,\n \"slug\": self.slug,\n },\n )\n\n\nclass Tag(models.Model):\n TAG_VALIDATOR = RegexValidator(r\"^[- _'\\w]{1,50}$\")\n value = models.CharField(max_length=50, validators=[TAG_VALIDATOR])\n\n def __str__(self):\n return self.value\n\n\nclass UserAssetTagCollection(models.Model):\n asset = models.ForeignKey(Asset, on_delete=models.CASCADE)\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n\n tags = models.ManyToManyField(Tag, blank=True)\n created_on = models.DateTimeField(auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"{} - {}\".format(self.asset, self.user)\n\n\nclass Transcription(models.Model):\n asset = models.ForeignKey(Asset, on_delete=models.CASCADE)\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n\n text = models.TextField(blank=True)\n status = models.CharField(\n max_length=10, choices=Status.CHOICES, default=Status.DEFAULT\n )\n\n created_on = models.DateTimeField(auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return str(self.asset)\n\n\nclass AssetTranscriptionReservation(models.Model):\n \"\"\"\n Records a user's reservation to transcribe a particular asset\n \"\"\"\n\n asset = models.OneToOneField(Asset, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n created_on = models.DateTimeField(editable=False, auto_now_add=True)\n updated_on = models.DateTimeField(auto_now=True)\n", "path": "concordia/models.py"}]} | 2,904 | 435 |
gh_patches_debug_25528 | rasdani/github-patches | git_diff | scrapy__scrapy-2464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
String value for order of Scrapy component
If Scrapy component order is defined as a string, it leads to undefined behaviour on Python 2 and to the following errors on Python 3:
```
File "/usr/local/lib/python3.5/site-packages/scrapy/middleware.py", line 58, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python3.5/site-packages/scrapy/middleware.py", line 29, in from_settings
mwlist = cls._get_mwlist_from_settings(settings)
File "/usr/local/lib/python3.5/site-packages/scrapy/core/spidermw.py", line 21, in _get_mwlist_from_settings
return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))
File "/usr/local/lib/python3.5/site-packages/scrapy/utils/conf.py", line 47, in build_component_list
return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
builtins.TypeError: unorderable types: str() < int()
```
My guess that 1) order of a Scrapy component should be stated as of integer type (or `None`) and there should be a check somewhere, 2) or the sorting logic should be fixed.
</issue>
<code>
[start of scrapy/utils/conf.py]
1 import os
2 import sys
3 from operator import itemgetter
4
5 import six
6 from six.moves.configparser import SafeConfigParser
7
8 from scrapy.settings import BaseSettings
9 from scrapy.utils.deprecate import update_classpath
10 from scrapy.utils.python import without_none_values
11
12
13 def build_component_list(compdict, custom=None, convert=update_classpath):
14 """Compose a component list from a { class: order } dictionary."""
15
16 def _check_components(complist):
17 if len({convert(c) for c in complist}) != len(complist):
18 raise ValueError('Some paths in {!r} convert to the same object, '
19 'please update your settings'.format(complist))
20
21 def _map_keys(compdict):
22 if isinstance(compdict, BaseSettings):
23 compbs = BaseSettings()
24 for k, v in six.iteritems(compdict):
25 prio = compdict.getpriority(k)
26 if compbs.getpriority(convert(k)) == prio:
27 raise ValueError('Some paths in {!r} convert to the same '
28 'object, please update your settings'
29 ''.format(list(compdict.keys())))
30 else:
31 compbs.set(convert(k), v, priority=prio)
32 return compbs
33 else:
34 _check_components(compdict)
35 return {convert(k): v for k, v in six.iteritems(compdict)}
36
37 # BEGIN Backwards compatibility for old (base, custom) call signature
38 if isinstance(custom, (list, tuple)):
39 _check_components(custom)
40 return type(custom)(convert(c) for c in custom)
41
42 if custom is not None:
43 compdict.update(custom)
44 # END Backwards compatibility
45
46 compdict = without_none_values(_map_keys(compdict))
47 return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
48
49
50 def arglist_to_dict(arglist):
51 """Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a
52 dict
53 """
54 return dict(x.split('=', 1) for x in arglist)
55
56
57 def closest_scrapy_cfg(path='.', prevpath=None):
58 """Return the path to the closest scrapy.cfg file by traversing the current
59 directory and its parents
60 """
61 if path == prevpath:
62 return ''
63 path = os.path.abspath(path)
64 cfgfile = os.path.join(path, 'scrapy.cfg')
65 if os.path.exists(cfgfile):
66 return cfgfile
67 return closest_scrapy_cfg(os.path.dirname(path), path)
68
69
70 def init_env(project='default', set_syspath=True):
71 """Initialize environment to use command-line tool from inside a project
72 dir. This sets the Scrapy settings module and modifies the Python path to
73 be able to locate the project module.
74 """
75 cfg = get_config()
76 if cfg.has_option('settings', project):
77 os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)
78 closest = closest_scrapy_cfg()
79 if closest:
80 projdir = os.path.dirname(closest)
81 if set_syspath and projdir not in sys.path:
82 sys.path.append(projdir)
83
84
85 def get_config(use_closest=True):
86 """Get Scrapy config file as a SafeConfigParser"""
87 sources = get_sources(use_closest)
88 cfg = SafeConfigParser()
89 cfg.read(sources)
90 return cfg
91
92
93 def get_sources(use_closest=True):
94 xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
95 os.path.expanduser('~/.config')
96 sources = ['/etc/scrapy.cfg', r'c:\scrapy\scrapy.cfg',
97 xdg_config_home + '/scrapy.cfg',
98 os.path.expanduser('~/.scrapy.cfg')]
99 if use_closest:
100 sources.append(closest_scrapy_cfg())
101 return sources
102
[end of scrapy/utils/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/conf.py b/scrapy/utils/conf.py
--- a/scrapy/utils/conf.py
+++ b/scrapy/utils/conf.py
@@ -1,5 +1,6 @@
import os
import sys
+import numbers
from operator import itemgetter
import six
@@ -34,6 +35,13 @@
_check_components(compdict)
return {convert(k): v for k, v in six.iteritems(compdict)}
+ def _validate_values(compdict):
+ """Fail if a value in the components dict is not a real number or None."""
+ for name, value in six.iteritems(compdict):
+ if value is not None and not isinstance(value, numbers.Real):
+ raise ValueError('Invalid value {} for component {}, please provide ' \
+ 'a real number or None instead'.format(value, name))
+
# BEGIN Backwards compatibility for old (base, custom) call signature
if isinstance(custom, (list, tuple)):
_check_components(custom)
@@ -43,6 +51,7 @@
compdict.update(custom)
# END Backwards compatibility
+ _validate_values(compdict)
compdict = without_none_values(_map_keys(compdict))
return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
| {"golden_diff": "diff --git a/scrapy/utils/conf.py b/scrapy/utils/conf.py\n--- a/scrapy/utils/conf.py\n+++ b/scrapy/utils/conf.py\n@@ -1,5 +1,6 @@\n import os\n import sys\n+import numbers\n from operator import itemgetter\n \n import six\n@@ -34,6 +35,13 @@\n _check_components(compdict)\n return {convert(k): v for k, v in six.iteritems(compdict)}\n \n+ def _validate_values(compdict):\n+ \"\"\"Fail if a value in the components dict is not a real number or None.\"\"\"\n+ for name, value in six.iteritems(compdict):\n+ if value is not None and not isinstance(value, numbers.Real):\n+ raise ValueError('Invalid value {} for component {}, please provide ' \\\n+ 'a real number or None instead'.format(value, name))\n+\n # BEGIN Backwards compatibility for old (base, custom) call signature\n if isinstance(custom, (list, tuple)):\n _check_components(custom)\n@@ -43,6 +51,7 @@\n compdict.update(custom)\n # END Backwards compatibility\n \n+ _validate_values(compdict)\n compdict = without_none_values(_map_keys(compdict))\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\n", "issue": "String value for order of Scrapy component\nIf Scrapy component order is defined as a string, it leads to undefined behaviour on Python 2 and to the following errors on Python 3:\r\n```\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/middleware.py\", line 58, in from_crawler\r\n return cls.from_settings(crawler.settings, crawler)\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/middleware.py\", line 29, in from_settings\r\n mwlist = cls._get_mwlist_from_settings(settings)\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/core/spidermw.py\", line 21, in _get_mwlist_from_settings\r\n return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/utils/conf.py\", line 47, in build_component_list\r\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\r\nbuiltins.TypeError: unorderable types: str() < int()\r\n```\r\n\r\nMy guess that 1) order of a Scrapy component should be stated as of integer type (or `None`) and there should be a check somewhere, 2) or the sorting logic should be fixed.\n", "before_files": [{"content": "import os\nimport sys\nfrom operator import itemgetter\n\nimport six\nfrom six.moves.configparser import SafeConfigParser\n\nfrom scrapy.settings import BaseSettings\nfrom scrapy.utils.deprecate import update_classpath\nfrom scrapy.utils.python import without_none_values\n\n\ndef build_component_list(compdict, custom=None, convert=update_classpath):\n \"\"\"Compose a component list from a { class: order } dictionary.\"\"\"\n\n def _check_components(complist):\n if len({convert(c) for c in complist}) != len(complist):\n raise ValueError('Some paths in {!r} convert to the same object, '\n 'please update your settings'.format(complist))\n\n def _map_keys(compdict):\n if isinstance(compdict, BaseSettings):\n compbs = BaseSettings()\n for k, v in six.iteritems(compdict):\n prio = compdict.getpriority(k)\n if compbs.getpriority(convert(k)) == prio:\n raise ValueError('Some paths in {!r} convert to the same '\n 'object, please update your settings'\n ''.format(list(compdict.keys())))\n else:\n compbs.set(convert(k), v, priority=prio)\n return compbs\n else:\n _check_components(compdict)\n return {convert(k): v for k, v in six.iteritems(compdict)}\n\n # BEGIN Backwards compatibility for old (base, custom) call signature\n if isinstance(custom, (list, tuple)):\n _check_components(custom)\n return type(custom)(convert(c) for c in custom)\n\n if custom is not None:\n compdict.update(custom)\n # END Backwards compatibility\n\n compdict = without_none_values(_map_keys(compdict))\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\n\n\ndef arglist_to_dict(arglist):\n \"\"\"Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a\n dict\n \"\"\"\n return dict(x.split('=', 1) for x in arglist)\n\n\ndef closest_scrapy_cfg(path='.', prevpath=None):\n \"\"\"Return the path to the closest scrapy.cfg file by traversing the current\n directory and its parents\n \"\"\"\n if path == prevpath:\n return ''\n path = os.path.abspath(path)\n cfgfile = os.path.join(path, 'scrapy.cfg')\n if os.path.exists(cfgfile):\n return cfgfile\n return closest_scrapy_cfg(os.path.dirname(path), path)\n\n\ndef init_env(project='default', set_syspath=True):\n \"\"\"Initialize environment to use command-line tool from inside a project\n dir. This sets the Scrapy settings module and modifies the Python path to\n be able to locate the project module.\n \"\"\"\n cfg = get_config()\n if cfg.has_option('settings', project):\n os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)\n closest = closest_scrapy_cfg()\n if closest:\n projdir = os.path.dirname(closest)\n if set_syspath and projdir not in sys.path:\n sys.path.append(projdir)\n\n\ndef get_config(use_closest=True):\n \"\"\"Get Scrapy config file as a SafeConfigParser\"\"\"\n sources = get_sources(use_closest)\n cfg = SafeConfigParser()\n cfg.read(sources)\n return cfg\n\n\ndef get_sources(use_closest=True):\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \\\n os.path.expanduser('~/.config')\n sources = ['/etc/scrapy.cfg', r'c:\\scrapy\\scrapy.cfg',\n xdg_config_home + '/scrapy.cfg',\n os.path.expanduser('~/.scrapy.cfg')]\n if use_closest:\n sources.append(closest_scrapy_cfg())\n return sources\n", "path": "scrapy/utils/conf.py"}]} | 1,818 | 285 |
gh_patches_debug_860 | rasdani/github-patches | git_diff | modin-project__modin-2173 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[OmniSci] Add float32 dtype support
Looks like our calcite serializer doesn't support float32 type.
</issue>
<code>
[start of modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 from .expr import (
15 BaseExpr,
16 LiteralExpr,
17 OpExpr,
18 AggregateExpr,
19 )
20 from .calcite_algebra import (
21 CalciteBaseNode,
22 CalciteInputRefExpr,
23 CalciteInputIdxExpr,
24 CalciteScanNode,
25 CalciteProjectionNode,
26 CalciteFilterNode,
27 CalciteAggregateNode,
28 CalciteCollation,
29 CalciteSortNode,
30 CalciteJoinNode,
31 CalciteUnionNode,
32 )
33 import json
34 import numpy as np
35
36
37 class CalciteSerializer:
38 dtype_strings = {
39 "int8": "TINYINT",
40 "int16": "SMALLINT",
41 "int32": "INTEGER",
42 "int64": "BIGINT",
43 "bool": "BOOLEAN",
44 "float64": "DOUBLE",
45 }
46
47 def serialize(self, plan):
48 return json.dumps({"rels": [self.serialize_item(node) for node in plan]})
49
50 def expect_one_of(self, val, *types):
51 for t in types:
52 if isinstance(val, t):
53 return
54 raise TypeError("Can not serialize {}".format(type(val).__name__))
55
56 def serialize_item(self, item):
57 if isinstance(item, CalciteBaseNode):
58 return self.serialize_node(item)
59 elif isinstance(item, BaseExpr):
60 return self.serialize_expr(item)
61 elif isinstance(item, CalciteCollation):
62 return self.serialize_obj(item)
63 elif isinstance(item, list):
64 return [self.serialize_item(v) for v in item]
65
66 self.expect_one_of(item, str, int)
67 return item
68
69 def serialize_node(self, node):
70 # We need to setup context for proper references
71 # serialization
72 if isinstance(
73 node,
74 (
75 CalciteScanNode,
76 CalciteProjectionNode,
77 CalciteFilterNode,
78 CalciteAggregateNode,
79 CalciteSortNode,
80 CalciteJoinNode,
81 CalciteUnionNode,
82 ),
83 ):
84 return self.serialize_obj(node)
85 else:
86 raise NotImplementedError(
87 "Can not serialize {}".format(type(node).__name__)
88 )
89
90 def serialize_obj(self, obj):
91 res = {}
92 for k, v in obj.__dict__.items():
93 if k[0] != "_":
94 res[k] = self.serialize_item(v)
95 return res
96
97 def serialize_typed_obj(self, obj):
98 res = self.serialize_obj(obj)
99 res["type"] = self.serialize_dtype(obj._dtype)
100 return res
101
102 def serialize_expr(self, expr):
103 if isinstance(expr, LiteralExpr):
104 return self.serialize_literal(expr)
105 elif isinstance(expr, CalciteInputRefExpr):
106 return self.serialize_obj(expr)
107 elif isinstance(expr, CalciteInputIdxExpr):
108 return self.serialize_input_idx(expr)
109 elif isinstance(expr, OpExpr):
110 return self.serialize_typed_obj(expr)
111 elif isinstance(expr, AggregateExpr):
112 return self.serialize_typed_obj(expr)
113 else:
114 raise NotImplementedError(
115 "Can not serialize {}".format(type(expr).__name__)
116 )
117
118 def serialize_literal(self, literal):
119 if literal.val is None:
120 return {
121 "literal": None,
122 "type": "BIGINT",
123 "target_type": "BIGINT",
124 "scale": 0,
125 "precision": 19,
126 "type_scale": 0,
127 "type_precision": 19,
128 }
129 if type(literal.val) is str:
130 return {
131 "literal": literal.val,
132 "type": "CHAR",
133 "target_type": "CHAR",
134 "scale": -2147483648,
135 "precision": len(literal.val),
136 "type_scale": -2147483648,
137 "type_precision": len(literal.val),
138 }
139 if type(literal.val) in (int, np.int8, np.int16, np.int32, np.int64):
140 target_type, precision = self.opts_for_int_type(type(literal.val))
141 return {
142 "literal": int(literal.val),
143 "type": "DECIMAL",
144 "target_type": target_type,
145 "scale": 0,
146 "precision": len(str(literal.val)),
147 "type_scale": 0,
148 "type_precision": precision,
149 }
150 if type(literal.val) in (float, np.float64):
151 str_val = f"{literal.val:f}"
152 precision = len(str_val) - 1
153 scale = precision - str_val.index(".")
154 return {
155 "literal": int(str_val.replace(".", "")),
156 "type": "DECIMAL",
157 "target_type": "DOUBLE",
158 "scale": scale,
159 "precision": precision,
160 "type_scale": -2147483648,
161 "type_precision": 15,
162 }
163 if type(literal.val) is bool:
164 return {
165 "literal": literal.val,
166 "type": "BOOLEAN",
167 "target_type": "BOOLEAN",
168 "scale": -2147483648,
169 "precision": 1,
170 "type_scale": -2147483648,
171 "type_precision": 1,
172 }
173 raise NotImplementedError(f"Can not serialize {type(literal.val).__name__}")
174
175 def opts_for_int_type(self, int_type):
176 if int_type is np.int8:
177 return "TINYINT", 3
178 if int_type is np.int16:
179 return "SMALLINT", 5
180 if int_type is np.int32:
181 return "INTEGER", 10
182 if int_type in (np.int64, int):
183 return "BIGINT", 19
184 raise NotImplementedError(f"Unsupported integer type {int_type.__name__}")
185
186 def serialize_dtype(self, dtype):
187 return {"type": type(self).dtype_strings[dtype.name], "nullable": True}
188
189 def serialize_input_idx(self, expr):
190 return expr.input
191
[end of modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py b/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py
--- a/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py
+++ b/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py
@@ -41,6 +41,7 @@
"int32": "INTEGER",
"int64": "BIGINT",
"bool": "BOOLEAN",
+ "float32": "FLOAT",
"float64": "DOUBLE",
}
| {"golden_diff": "diff --git a/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py b/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py\n--- a/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py\n+++ b/modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py\n@@ -41,6 +41,7 @@\n \"int32\": \"INTEGER\",\n \"int64\": \"BIGINT\",\n \"bool\": \"BOOLEAN\",\n+ \"float32\": \"FLOAT\",\n \"float64\": \"DOUBLE\",\n }\n", "issue": "[OmniSci] Add float32 dtype support\nLooks like our calcite serializer doesn't support float32 type.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom .expr import (\n BaseExpr,\n LiteralExpr,\n OpExpr,\n AggregateExpr,\n)\nfrom .calcite_algebra import (\n CalciteBaseNode,\n CalciteInputRefExpr,\n CalciteInputIdxExpr,\n CalciteScanNode,\n CalciteProjectionNode,\n CalciteFilterNode,\n CalciteAggregateNode,\n CalciteCollation,\n CalciteSortNode,\n CalciteJoinNode,\n CalciteUnionNode,\n)\nimport json\nimport numpy as np\n\n\nclass CalciteSerializer:\n dtype_strings = {\n \"int8\": \"TINYINT\",\n \"int16\": \"SMALLINT\",\n \"int32\": \"INTEGER\",\n \"int64\": \"BIGINT\",\n \"bool\": \"BOOLEAN\",\n \"float64\": \"DOUBLE\",\n }\n\n def serialize(self, plan):\n return json.dumps({\"rels\": [self.serialize_item(node) for node in plan]})\n\n def expect_one_of(self, val, *types):\n for t in types:\n if isinstance(val, t):\n return\n raise TypeError(\"Can not serialize {}\".format(type(val).__name__))\n\n def serialize_item(self, item):\n if isinstance(item, CalciteBaseNode):\n return self.serialize_node(item)\n elif isinstance(item, BaseExpr):\n return self.serialize_expr(item)\n elif isinstance(item, CalciteCollation):\n return self.serialize_obj(item)\n elif isinstance(item, list):\n return [self.serialize_item(v) for v in item]\n\n self.expect_one_of(item, str, int)\n return item\n\n def serialize_node(self, node):\n # We need to setup context for proper references\n # serialization\n if isinstance(\n node,\n (\n CalciteScanNode,\n CalciteProjectionNode,\n CalciteFilterNode,\n CalciteAggregateNode,\n CalciteSortNode,\n CalciteJoinNode,\n CalciteUnionNode,\n ),\n ):\n return self.serialize_obj(node)\n else:\n raise NotImplementedError(\n \"Can not serialize {}\".format(type(node).__name__)\n )\n\n def serialize_obj(self, obj):\n res = {}\n for k, v in obj.__dict__.items():\n if k[0] != \"_\":\n res[k] = self.serialize_item(v)\n return res\n\n def serialize_typed_obj(self, obj):\n res = self.serialize_obj(obj)\n res[\"type\"] = self.serialize_dtype(obj._dtype)\n return res\n\n def serialize_expr(self, expr):\n if isinstance(expr, LiteralExpr):\n return self.serialize_literal(expr)\n elif isinstance(expr, CalciteInputRefExpr):\n return self.serialize_obj(expr)\n elif isinstance(expr, CalciteInputIdxExpr):\n return self.serialize_input_idx(expr)\n elif isinstance(expr, OpExpr):\n return self.serialize_typed_obj(expr)\n elif isinstance(expr, AggregateExpr):\n return self.serialize_typed_obj(expr)\n else:\n raise NotImplementedError(\n \"Can not serialize {}\".format(type(expr).__name__)\n )\n\n def serialize_literal(self, literal):\n if literal.val is None:\n return {\n \"literal\": None,\n \"type\": \"BIGINT\",\n \"target_type\": \"BIGINT\",\n \"scale\": 0,\n \"precision\": 19,\n \"type_scale\": 0,\n \"type_precision\": 19,\n }\n if type(literal.val) is str:\n return {\n \"literal\": literal.val,\n \"type\": \"CHAR\",\n \"target_type\": \"CHAR\",\n \"scale\": -2147483648,\n \"precision\": len(literal.val),\n \"type_scale\": -2147483648,\n \"type_precision\": len(literal.val),\n }\n if type(literal.val) in (int, np.int8, np.int16, np.int32, np.int64):\n target_type, precision = self.opts_for_int_type(type(literal.val))\n return {\n \"literal\": int(literal.val),\n \"type\": \"DECIMAL\",\n \"target_type\": target_type,\n \"scale\": 0,\n \"precision\": len(str(literal.val)),\n \"type_scale\": 0,\n \"type_precision\": precision,\n }\n if type(literal.val) in (float, np.float64):\n str_val = f\"{literal.val:f}\"\n precision = len(str_val) - 1\n scale = precision - str_val.index(\".\")\n return {\n \"literal\": int(str_val.replace(\".\", \"\")),\n \"type\": \"DECIMAL\",\n \"target_type\": \"DOUBLE\",\n \"scale\": scale,\n \"precision\": precision,\n \"type_scale\": -2147483648,\n \"type_precision\": 15,\n }\n if type(literal.val) is bool:\n return {\n \"literal\": literal.val,\n \"type\": \"BOOLEAN\",\n \"target_type\": \"BOOLEAN\",\n \"scale\": -2147483648,\n \"precision\": 1,\n \"type_scale\": -2147483648,\n \"type_precision\": 1,\n }\n raise NotImplementedError(f\"Can not serialize {type(literal.val).__name__}\")\n\n def opts_for_int_type(self, int_type):\n if int_type is np.int8:\n return \"TINYINT\", 3\n if int_type is np.int16:\n return \"SMALLINT\", 5\n if int_type is np.int32:\n return \"INTEGER\", 10\n if int_type in (np.int64, int):\n return \"BIGINT\", 19\n raise NotImplementedError(f\"Unsupported integer type {int_type.__name__}\")\n\n def serialize_dtype(self, dtype):\n return {\"type\": type(self).dtype_strings[dtype.name], \"nullable\": True}\n\n def serialize_input_idx(self, expr):\n return expr.input\n", "path": "modin/experimental/engines/omnisci_on_ray/frame/calcite_serializer.py"}]} | 2,509 | 147 |
gh_patches_debug_7393 | rasdani/github-patches | git_diff | ivy-llc__ivy-19179 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
column_stack
</issue>
<code>
[start of ivy/functional/frontends/jax/numpy/manipulations.py]
1 # local
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import (
4 to_ivy_arrays_and_back,
5 handle_jax_dtype,
6 )
7 from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs
8
9
10 @to_ivy_arrays_and_back
11 def clip(a, a_min=None, a_max=None, out=None):
12 ivy.utils.assertions.check_all_or_any_fn(
13 a_min,
14 a_max,
15 fn=ivy.exists,
16 type="any",
17 limit=[1, 2],
18 message="at most one of a_min or a_max can be None",
19 )
20 a = ivy.array(a)
21 if a_min is None:
22 a, a_max = promote_types_of_jax_inputs(a, a_max)
23 return ivy.minimum(a, a_max, out=out)
24 if a_max is None:
25 a, a_min = promote_types_of_jax_inputs(a, a_min)
26 return ivy.maximum(a, a_min, out=out)
27 return ivy.clip(a, a_min, a_max, out=out)
28
29
30 @handle_jax_dtype
31 @to_ivy_arrays_and_back
32 def concatenate(arrays, axis=0, dtype=None):
33 ret = ivy.concat(arrays, axis=axis)
34 if dtype:
35 ret = ivy.array(ret, dtype=dtype)
36 return ret
37
38
39 @to_ivy_arrays_and_back
40 def repeat(a, repeats, axis=None, *, total_repeat_length=None):
41 return ivy.repeat(a, repeats, axis=axis)
42
43
44 @to_ivy_arrays_and_back
45 def reshape(a, newshape, order="C"):
46 return ivy.reshape(a, shape=newshape, order=order)
47
48
49 @to_ivy_arrays_and_back
50 def ravel(a, order="C"):
51 return ivy.reshape(a, shape=(-1,), order=order)
52
53
54 @to_ivy_arrays_and_back
55 def resize(a, new_shape):
56 a = ivy.array(a)
57 resized_a = ivy.reshape(a, new_shape)
58 return resized_a
59
60
61 @to_ivy_arrays_and_back
62 def moveaxis(a, source, destination):
63 return ivy.moveaxis(a, source, destination)
64
65
66 @to_ivy_arrays_and_back
67 def flipud(m):
68 return ivy.flipud(m, out=None)
69
70
71 @to_ivy_arrays_and_back
72 def transpose(a, axes=None):
73 if ivy.isscalar(a):
74 return ivy.array(a)
75 elif a.ndim == 1:
76 return a
77 if not axes:
78 axes = list(range(len(a.shape)))[::-1]
79 if type(axes) is int:
80 axes = [axes]
81 if (len(a.shape) == 0 and not axes) or (len(a.shape) == 1 and axes[0] == 0):
82 return a
83 return ivy.permute_dims(a, axes, out=None)
84
85
86 @to_ivy_arrays_and_back
87 def flip(m, axis=None):
88 return ivy.flip(m, axis=axis)
89
90
91 @to_ivy_arrays_and_back
92 def fliplr(m):
93 return ivy.fliplr(m)
94
95
96 @to_ivy_arrays_and_back
97 def expand_dims(a, axis):
98 return ivy.expand_dims(a, axis=axis)
99
100
101 @to_ivy_arrays_and_back
102 def stack(arrays, axis=0, out=None, dtype=None):
103 if dtype:
104 return ivy.astype(
105 ivy.stack(arrays, axis=axis, out=out), ivy.as_ivy_dtype(dtype)
106 )
107 return ivy.stack(arrays, axis=axis, out=out)
108
109
110 @to_ivy_arrays_and_back
111 def take(
112 a,
113 indices,
114 axis=None,
115 out=None,
116 mode=None,
117 unique_indices=False,
118 indices_are_sorted=False,
119 fill_value=None,
120 ):
121 return ivy.gather(a, indices, axis=axis, out=out)
122
123
124 @to_ivy_arrays_and_back
125 def broadcast_arrays(*args):
126 return ivy.broadcast_arrays(*args)
127
128
129 @to_ivy_arrays_and_back
130 def broadcast_shapes(*shapes):
131 return ivy.broadcast_shapes(*shapes)
132
133
134 @to_ivy_arrays_and_back
135 def broadcast_to(array, shape):
136 return ivy.broadcast_to(array, shape)
137
138
139 @to_ivy_arrays_and_back
140 def append(arr, values, axis=None):
141 if axis is None:
142 return ivy.concat((ivy.flatten(arr), ivy.flatten(values)), axis=0)
143 else:
144 return ivy.concat((arr, values), axis=axis)
145
146
147 @to_ivy_arrays_and_back
148 def swapaxes(a, axis1, axis2):
149 return ivy.swapaxes(a, axis1, axis2)
150
151
152 @to_ivy_arrays_and_back
153 def atleast_3d(*arys):
154 return ivy.atleast_3d(*arys)
155
156
157 @to_ivy_arrays_and_back
158 def atleast_1d(*arys):
159 return ivy.atleast_1d(*arys)
160
161
162 @to_ivy_arrays_and_back
163 def atleast_2d(*arys):
164 return ivy.atleast_2d(*arys)
165
166
167 @to_ivy_arrays_and_back
168 def tril(m, k=0):
169 return ivy.tril(m, k=k)
170
171
172 @to_ivy_arrays_and_back
173 def block(arr):
174 # TODO: reimplement block
175 raise ivy.utils.exceptions.IvyNotImplementedError()
176
177
178 @to_ivy_arrays_and_back
179 def squeeze(a, axis=None):
180 return ivy.squeeze(a, axis=axis)
181
182
183 @to_ivy_arrays_and_back
184 def rot90(m, k=1, axes=(0, 1)):
185 return ivy.rot90(m, k=k, axes=axes)
186
187
188 @to_ivy_arrays_and_back
189 def split(ary, indices_or_sections, axis=0):
190 if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
191 indices_or_sections = (
192 ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[axis]])
193 .astype(ivy.int8)
194 .to_list()
195 )
196 return ivy.split(
197 ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=False
198 )
199
200
201 @to_ivy_arrays_and_back
202 def array_split(ary, indices_or_sections, axis=0):
203 return ivy.split(
204 ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=True
205 )
206
207
208 @to_ivy_arrays_and_back
209 def tile(A, reps):
210 return ivy.tile(A, reps)
211
212
213 @to_ivy_arrays_and_back
214 def dsplit(ary, indices_or_sections):
215 if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
216 indices_or_sections = (
217 ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[2]])
218 .astype(ivy.int8)
219 .to_list()
220 )
221 return ivy.dsplit(ary, indices_or_sections)
222
223
224 @to_ivy_arrays_and_back
225 def dstack(tup, dtype=None):
226 return ivy.dstack(tup)
227
228
229 @to_ivy_arrays_and_back
230 def vsplit(ary, indices_or_sections):
231 if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
232 indices_or_sections = (
233 ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]])
234 .astype(ivy.int8)
235 .to_list()
236 )
237 return ivy.vsplit(ary, indices_or_sections)
238
239
240 @to_ivy_arrays_and_back
241 def hsplit(ary, indices_or_sections):
242 if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
243 if ary.ndim == 1:
244 indices_or_sections = (
245 ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]])
246 .astype(ivy.int8)
247 .to_list()
248 )
249 else:
250 indices_or_sections = (
251 ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[1]])
252 .astype(ivy.int8)
253 .to_list()
254 )
255 return ivy.hsplit(ary, indices_or_sections)
256
257
258 @to_ivy_arrays_and_back
259 def roll(a, shift, axis=None):
260 return ivy.roll(a, shift, axis=axis)
261
262
263 @to_ivy_arrays_and_back
264 def row_stack(tup):
265 if len(ivy.shape(tup[0])) == 1:
266 xs = []
267 for t in tup:
268 xs += [ivy.reshape(t, (1, ivy.shape(t)[0]))]
269 return ivy.concat(xs, axis=0)
270 return ivy.concat(tup, axis=0)
271
272
273 @to_ivy_arrays_and_back
274 def pad(array, pad_width, mode="constant", **kwargs):
275 return ivy.pad(array, pad_width, mode=mode, **kwargs)
276
277
278 def hamming(M):
279 if M <= 1:
280 return ivy.ones([M], dtype=ivy.float64)
281 n = ivy.arange(M)
282 ret = 0.54 - 0.46 * ivy.cos(2.0 * ivy.pi * n / (M - 1))
283 return ret
284
285
286 @to_ivy_arrays_and_back
287 def hanning(M):
288 if M <= 1:
289 return ivy.ones([M], dtype=ivy.float64)
290 n = ivy.arange(M)
291 ret = 0.5 * (1 - ivy.cos(2.0 * ivy.pi * n / (M - 1)))
292 return ret
293
294
295 @to_ivy_arrays_and_back
296 def kaiser(M, beta):
297 if M <= 1:
298 return ivy.ones([M], dtype=ivy.float64)
299 n = ivy.arange(M)
300 alpha = 0.5 * (M - 1)
301 ret = ivy.i0(beta * ivy.sqrt(1 - ((n - alpha) / alpha) ** 2)) / ivy.i0(beta)
302 return ret
303
304
305 @handle_jax_dtype
306 @to_ivy_arrays_and_back
307 def tri(N, M=None, k=0, dtype="float64"):
308 if M is None:
309 M = N
310 ones = ivy.ones((N, M), dtype=dtype)
311 return ivy.tril(ones, k=k)
312
313
314 @to_ivy_arrays_and_back
315 def blackman(M):
316 if M < 1:
317 return ivy.array([])
318 if M == 1:
319 return ivy.ones((1,))
320 n = ivy.arange(0, M)
321 alpha = 0.16
322 a0 = (1 - alpha) / 2
323 a1 = 1 / 2
324 a2 = alpha / 2
325 ret = (
326 a0
327 - a1 * ivy.cos(2 * ivy.pi * n / (M - 1))
328 + a2 * ivy.cos(4 * ivy.pi * n / (M - 1))
329 )
330 return ret
331
[end of ivy/functional/frontends/jax/numpy/manipulations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/jax/numpy/manipulations.py b/ivy/functional/frontends/jax/numpy/manipulations.py
--- a/ivy/functional/frontends/jax/numpy/manipulations.py
+++ b/ivy/functional/frontends/jax/numpy/manipulations.py
@@ -270,6 +270,16 @@
return ivy.concat(tup, axis=0)
+@to_ivy_arrays_and_back
+def column_stack(tup):
+ if len(ivy.shape(tup[0])) == 1:
+ ys = []
+ for t in tup:
+ ys += [ivy.reshape(t, (ivy.shape(t)[0], 1))]
+ return ivy.concat(ys, axis=1)
+ return ivy.concat(tup, axis=1)
+
+
@to_ivy_arrays_and_back
def pad(array, pad_width, mode="constant", **kwargs):
return ivy.pad(array, pad_width, mode=mode, **kwargs)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/manipulations.py b/ivy/functional/frontends/jax/numpy/manipulations.py\n--- a/ivy/functional/frontends/jax/numpy/manipulations.py\n+++ b/ivy/functional/frontends/jax/numpy/manipulations.py\n@@ -270,6 +270,16 @@\n return ivy.concat(tup, axis=0)\n \n \n+@to_ivy_arrays_and_back\n+def column_stack(tup):\n+ if len(ivy.shape(tup[0])) == 1:\n+ ys = []\n+ for t in tup:\n+ ys += [ivy.reshape(t, (ivy.shape(t)[0], 1))]\n+ return ivy.concat(ys, axis=1)\n+ return ivy.concat(tup, axis=1)\n+\n+\n @to_ivy_arrays_and_back\n def pad(array, pad_width, mode=\"constant\", **kwargs):\n return ivy.pad(array, pad_width, mode=mode, **kwargs)\n", "issue": "column_stack\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\nfrom ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs\n\n\n@to_ivy_arrays_and_back\ndef clip(a, a_min=None, a_max=None, out=None):\n ivy.utils.assertions.check_all_or_any_fn(\n a_min,\n a_max,\n fn=ivy.exists,\n type=\"any\",\n limit=[1, 2],\n message=\"at most one of a_min or a_max can be None\",\n )\n a = ivy.array(a)\n if a_min is None:\n a, a_max = promote_types_of_jax_inputs(a, a_max)\n return ivy.minimum(a, a_max, out=out)\n if a_max is None:\n a, a_min = promote_types_of_jax_inputs(a, a_min)\n return ivy.maximum(a, a_min, out=out)\n return ivy.clip(a, a_min, a_max, out=out)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef concatenate(arrays, axis=0, dtype=None):\n ret = ivy.concat(arrays, axis=axis)\n if dtype:\n ret = ivy.array(ret, dtype=dtype)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef repeat(a, repeats, axis=None, *, total_repeat_length=None):\n return ivy.repeat(a, repeats, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef reshape(a, newshape, order=\"C\"):\n return ivy.reshape(a, shape=newshape, order=order)\n\n\n@to_ivy_arrays_and_back\ndef ravel(a, order=\"C\"):\n return ivy.reshape(a, shape=(-1,), order=order)\n\n\n@to_ivy_arrays_and_back\ndef resize(a, new_shape):\n a = ivy.array(a)\n resized_a = ivy.reshape(a, new_shape)\n return resized_a\n\n\n@to_ivy_arrays_and_back\ndef moveaxis(a, source, destination):\n return ivy.moveaxis(a, source, destination)\n\n\n@to_ivy_arrays_and_back\ndef flipud(m):\n return ivy.flipud(m, out=None)\n\n\n@to_ivy_arrays_and_back\ndef transpose(a, axes=None):\n if ivy.isscalar(a):\n return ivy.array(a)\n elif a.ndim == 1:\n return a\n if not axes:\n axes = list(range(len(a.shape)))[::-1]\n if type(axes) is int:\n axes = [axes]\n if (len(a.shape) == 0 and not axes) or (len(a.shape) == 1 and axes[0] == 0):\n return a\n return ivy.permute_dims(a, axes, out=None)\n\n\n@to_ivy_arrays_and_back\ndef flip(m, axis=None):\n return ivy.flip(m, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef fliplr(m):\n return ivy.fliplr(m)\n\n\n@to_ivy_arrays_and_back\ndef expand_dims(a, axis):\n return ivy.expand_dims(a, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef stack(arrays, axis=0, out=None, dtype=None):\n if dtype:\n return ivy.astype(\n ivy.stack(arrays, axis=axis, out=out), ivy.as_ivy_dtype(dtype)\n )\n return ivy.stack(arrays, axis=axis, out=out)\n\n\n@to_ivy_arrays_and_back\ndef take(\n a,\n indices,\n axis=None,\n out=None,\n mode=None,\n unique_indices=False,\n indices_are_sorted=False,\n fill_value=None,\n):\n return ivy.gather(a, indices, axis=axis, out=out)\n\n\n@to_ivy_arrays_and_back\ndef broadcast_arrays(*args):\n return ivy.broadcast_arrays(*args)\n\n\n@to_ivy_arrays_and_back\ndef broadcast_shapes(*shapes):\n return ivy.broadcast_shapes(*shapes)\n\n\n@to_ivy_arrays_and_back\ndef broadcast_to(array, shape):\n return ivy.broadcast_to(array, shape)\n\n\n@to_ivy_arrays_and_back\ndef append(arr, values, axis=None):\n if axis is None:\n return ivy.concat((ivy.flatten(arr), ivy.flatten(values)), axis=0)\n else:\n return ivy.concat((arr, values), axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef swapaxes(a, axis1, axis2):\n return ivy.swapaxes(a, axis1, axis2)\n\n\n@to_ivy_arrays_and_back\ndef atleast_3d(*arys):\n return ivy.atleast_3d(*arys)\n\n\n@to_ivy_arrays_and_back\ndef atleast_1d(*arys):\n return ivy.atleast_1d(*arys)\n\n\n@to_ivy_arrays_and_back\ndef atleast_2d(*arys):\n return ivy.atleast_2d(*arys)\n\n\n@to_ivy_arrays_and_back\ndef tril(m, k=0):\n return ivy.tril(m, k=k)\n\n\n@to_ivy_arrays_and_back\ndef block(arr):\n # TODO: reimplement block\n raise ivy.utils.exceptions.IvyNotImplementedError()\n\n\n@to_ivy_arrays_and_back\ndef squeeze(a, axis=None):\n return ivy.squeeze(a, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef rot90(m, k=1, axes=(0, 1)):\n return ivy.rot90(m, k=k, axes=axes)\n\n\n@to_ivy_arrays_and_back\ndef split(ary, indices_or_sections, axis=0):\n if isinstance(indices_or_sections, (list, tuple, ivy.Array)):\n indices_or_sections = (\n ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[axis]])\n .astype(ivy.int8)\n .to_list()\n )\n return ivy.split(\n ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=False\n )\n\n\n@to_ivy_arrays_and_back\ndef array_split(ary, indices_or_sections, axis=0):\n return ivy.split(\n ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=True\n )\n\n\n@to_ivy_arrays_and_back\ndef tile(A, reps):\n return ivy.tile(A, reps)\n\n\n@to_ivy_arrays_and_back\ndef dsplit(ary, indices_or_sections):\n if isinstance(indices_or_sections, (list, tuple, ivy.Array)):\n indices_or_sections = (\n ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[2]])\n .astype(ivy.int8)\n .to_list()\n )\n return ivy.dsplit(ary, indices_or_sections)\n\n\n@to_ivy_arrays_and_back\ndef dstack(tup, dtype=None):\n return ivy.dstack(tup)\n\n\n@to_ivy_arrays_and_back\ndef vsplit(ary, indices_or_sections):\n if isinstance(indices_or_sections, (list, tuple, ivy.Array)):\n indices_or_sections = (\n ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]])\n .astype(ivy.int8)\n .to_list()\n )\n return ivy.vsplit(ary, indices_or_sections)\n\n\n@to_ivy_arrays_and_back\ndef hsplit(ary, indices_or_sections):\n if isinstance(indices_or_sections, (list, tuple, ivy.Array)):\n if ary.ndim == 1:\n indices_or_sections = (\n ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]])\n .astype(ivy.int8)\n .to_list()\n )\n else:\n indices_or_sections = (\n ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[1]])\n .astype(ivy.int8)\n .to_list()\n )\n return ivy.hsplit(ary, indices_or_sections)\n\n\n@to_ivy_arrays_and_back\ndef roll(a, shift, axis=None):\n return ivy.roll(a, shift, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef row_stack(tup):\n if len(ivy.shape(tup[0])) == 1:\n xs = []\n for t in tup:\n xs += [ivy.reshape(t, (1, ivy.shape(t)[0]))]\n return ivy.concat(xs, axis=0)\n return ivy.concat(tup, axis=0)\n\n\n@to_ivy_arrays_and_back\ndef pad(array, pad_width, mode=\"constant\", **kwargs):\n return ivy.pad(array, pad_width, mode=mode, **kwargs)\n\n\ndef hamming(M):\n if M <= 1:\n return ivy.ones([M], dtype=ivy.float64)\n n = ivy.arange(M)\n ret = 0.54 - 0.46 * ivy.cos(2.0 * ivy.pi * n / (M - 1))\n return ret\n\n\n@to_ivy_arrays_and_back\ndef hanning(M):\n if M <= 1:\n return ivy.ones([M], dtype=ivy.float64)\n n = ivy.arange(M)\n ret = 0.5 * (1 - ivy.cos(2.0 * ivy.pi * n / (M - 1)))\n return ret\n\n\n@to_ivy_arrays_and_back\ndef kaiser(M, beta):\n if M <= 1:\n return ivy.ones([M], dtype=ivy.float64)\n n = ivy.arange(M)\n alpha = 0.5 * (M - 1)\n ret = ivy.i0(beta * ivy.sqrt(1 - ((n - alpha) / alpha) ** 2)) / ivy.i0(beta)\n return ret\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef tri(N, M=None, k=0, dtype=\"float64\"):\n if M is None:\n M = N\n ones = ivy.ones((N, M), dtype=dtype)\n return ivy.tril(ones, k=k)\n\n\n@to_ivy_arrays_and_back\ndef blackman(M):\n if M < 1:\n return ivy.array([])\n if M == 1:\n return ivy.ones((1,))\n n = ivy.arange(0, M)\n alpha = 0.16\n a0 = (1 - alpha) / 2\n a1 = 1 / 2\n a2 = alpha / 2\n ret = (\n a0\n - a1 * ivy.cos(2 * ivy.pi * n / (M - 1))\n + a2 * ivy.cos(4 * ivy.pi * n / (M - 1))\n )\n return ret\n", "path": "ivy/functional/frontends/jax/numpy/manipulations.py"}]} | 3,861 | 226 |
gh_patches_debug_32161 | rasdani/github-patches | git_diff | sublimelsp__LSP-937 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keep using trigger characters, or start using selectors?
For some reason the final `commit_completion` command re-triggers the completion panel "forever". I need to `esc` to hide it.

Settings are
```
"auto_complete": true,
"auto_complete_commit_on_tab": true,
"auto_complete_with_fields": true,
```
_Originally posted by @deathaxe in https://github.com/sublimelsp/LSP/pull/866#issuecomment-603466761_
</issue>
<code>
[start of plugin/completion.py]
1 import sublime
2 import sublime_plugin
3
4 from .core.configurations import is_supported_syntax
5 from .core.edit import parse_text_edit
6 from .core.logging import debug
7 from .core.protocol import Request, Range, InsertTextFormat
8 from .core.registry import session_for_view, client_from_session, LSPViewEventListener
9 from .core.sessions import Session
10 from .core.settings import settings, client_configs
11 from .core.typing import Any, List, Dict, Optional, Union
12 from .core.views import range_to_region
13 from .core.views import text_document_position_params
14
15
16 completion_kinds = {
17 1: (sublime.KIND_ID_MARKUP, "Ξ", "Text"),
18 2: (sublime.KIND_ID_FUNCTION, "λ", "Method"),
19 3: (sublime.KIND_ID_FUNCTION, "λ", "Function"),
20 4: (sublime.KIND_ID_FUNCTION, "c", "Constructor"),
21 5: (sublime.KIND_ID_VARIABLE, "f", "Field"),
22 6: (sublime.KIND_ID_VARIABLE, "v", "Variable"),
23 7: (sublime.KIND_ID_TYPE, "c", "Class"),
24 8: (sublime.KIND_ID_TYPE, "i", "Interface"),
25 9: (sublime.KIND_ID_NAMESPACE, "◪", "Module"),
26 10: (sublime.KIND_ID_VARIABLE, "ρ", "Property"),
27 11: (sublime.KIND_ID_VARIABLE, "u", "Unit"),
28 12: (sublime.KIND_ID_VARIABLE, "ν", "Value"),
29 13: (sublime.KIND_ID_TYPE, "ε", "Enum"),
30 14: (sublime.KIND_ID_KEYWORD, "κ", "Keyword"),
31 15: (sublime.KIND_ID_SNIPPET, "s", "Snippet"),
32 16: (sublime.KIND_ID_AMBIGUOUS, "c", "Color"),
33 17: (sublime.KIND_ID_AMBIGUOUS, "#", "File"),
34 18: (sublime.KIND_ID_AMBIGUOUS, "⇢", "Reference"),
35 19: (sublime.KIND_ID_AMBIGUOUS, "ƒ", "Folder"),
36 20: (sublime.KIND_ID_TYPE, "ε", "EnumMember"),
37 21: (sublime.KIND_ID_VARIABLE, "π", "Constant"),
38 22: (sublime.KIND_ID_TYPE, "s", "Struct"),
39 23: (sublime.KIND_ID_FUNCTION, "e", "Event"),
40 24: (sublime.KIND_ID_KEYWORD, "ο", "Operator"),
41 25: (sublime.KIND_ID_TYPE, "τ", "Type Parameter")
42 }
43
44
45 def format_completion(item: dict, change_id: Any) -> sublime.CompletionItem:
46 item_kind = item.get("kind")
47 if item_kind:
48 kind = completion_kinds.get(item_kind, sublime.KIND_AMBIGUOUS)
49 else:
50 kind = sublime.KIND_AMBIGUOUS
51
52 if item.get("deprecated", False):
53 kind = (kind[0], '⚠', "⚠ {} - Deprecated".format(kind[2]))
54
55 item["change_id"] = change_id
56
57 return sublime.CompletionItem.command_completion(
58 trigger=item["label"],
59 command="lsp_select_completion_item",
60 args=item,
61 annotation=item.get('detail') or "",
62 kind=kind
63 )
64
65
66 class LspSelectCompletionItemCommand(sublime_plugin.TextCommand):
67 """
68 This command must handle four different kinds of LSP completion items:
69
70 1) plaintext + insertText (e.g. pyls)
71 2) plaintext + textEdit (e.g. intelephense)
72 3) snippet + insertText (???)
73 4) snippet + textEdit (e.g. clangd, intelephense)
74
75 For cases (3) and (4) we are forced to use the "insert_snippet" command.
76 """
77
78 def run(self, edit: sublime.Edit, **item: Any) -> None:
79 # Is it a textEdit or an insertText?
80 text_edit = item.get('textEdit')
81 if text_edit:
82 new_text = text_edit['newText']
83 # this region was valid a few view.change_count() moments back ...
84 edit_region = range_to_region(Range.from_lsp(text_edit['range']), self.view)
85 # ... but this brings it to the present.
86 edit_region = self.view.transform_region_from(edit_region, item["change_id"])
87 selection = self.view.sel()
88 primary_cursor_position = selection[0].b
89 for region in reversed(selection):
90 # For each selection region, apply the same removal as for the "primary" region.
91 # To do that, translate, or offset, the LSP edit region into the non-"primary" regions.
92 # The concept of "primary" is our own, and there is no mention of it in the LSP spec.
93 translation = region.b - primary_cursor_position
94 self.view.erase(edit, sublime.Region(edit_region.a + translation, edit_region.b + translation))
95 else:
96 new_text = item.get('insertText') or item['label']
97
98 # Is it a plaintext or a snippet?
99 if item.get("insertTextFormat", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:
100 self.view.run_command("insert_snippet", {"contents": new_text})
101 else:
102 self.view.run_command("insert", {"characters": new_text})
103
104 # import statements, etc. some servers only return these after a resolve.
105 additional_edits = item.get('additionalTextEdits')
106 if additional_edits:
107 self.apply_additional_edits(additional_edits)
108 else:
109 self.do_resolve(item)
110
111 def do_resolve(self, item: dict) -> None:
112 session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())
113 if not session:
114 return
115
116 client = client_from_session(session)
117 if not client:
118 return
119
120 completion_provider = session.get_capability('completionProvider')
121 has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)
122 if has_resolve_provider:
123 client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)
124
125 def handle_resolve_response(self, response: Optional[dict]) -> None:
126 if response:
127 additional_edits = response.get('additionalTextEdits')
128 if additional_edits:
129 self.apply_additional_edits(additional_edits)
130
131 def apply_additional_edits(self, additional_edits: List[dict]) -> None:
132 edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)
133 debug('applying additional edits:', edits)
134 self.view.run_command("lsp_apply_document_edit", {'changes': edits})
135 sublime.status_message('Applied additional edits for completion')
136
137
138 def resolve(completion_list: sublime.CompletionList, items: List[sublime.CompletionItem], flags: int = 0) -> None:
139 # Resolve the promise on the main thread to prevent any sort of data race for _set_target (see sublime_plugin.py).
140 sublime.set_timeout(lambda: completion_list.set_completions(items, flags))
141
142
143 class CompletionHandler(LSPViewEventListener):
144 def __init__(self, view: sublime.View) -> None:
145 super().__init__(view)
146 self.initialized = False
147 self.enabled = False
148
149 @classmethod
150 def is_applicable(cls, view_settings: dict) -> bool:
151 if 'completion' in settings.disabled_capabilities:
152 return False
153
154 syntax = view_settings.get('syntax')
155 return is_supported_syntax(syntax, client_configs.all) if syntax else False
156
157 def initialize(self) -> None:
158 self.initialized = True
159 session = session_for_view(self.view, 'completionProvider')
160 if session:
161 completionProvider = session.get_capability('completionProvider') or dict() # type: dict
162 # A language server may have an empty dict as CompletionOptions. In that case,
163 # no trigger characters will be registered but we'll still respond to Sublime's
164 # usual query for completions. So the explicit check for None is necessary.
165 self.enabled = True
166
167 trigger_chars = completionProvider.get(
168 'triggerCharacters') or []
169 if trigger_chars:
170 self.register_trigger_chars(session, trigger_chars)
171 # This is to make ST match with labels that have a weird prefix like a space character.
172 self.view.settings().set("auto_complete_preserve_order", "none")
173
174 def _view_language(self, config_name: str) -> Optional[str]:
175 languages = self.view.settings().get('lsp_language')
176 return languages.get(config_name) if languages else None
177
178 def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:
179 completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]
180 view_language = self._view_language(session.config.name)
181 if view_language:
182 for language in session.config.languages:
183 if language.id == view_language:
184 for scope in language.scopes:
185 # debug("registering", trigger_chars, "for", scope)
186 scope_trigger = next(
187 (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),
188 None
189 )
190 if not scope_trigger: # do not override user's trigger settings.
191 completion_triggers.append({
192 'characters': "".join(trigger_chars),
193 'selector': scope
194 })
195
196 self.view.settings().set('auto_complete_triggers', completion_triggers)
197
198 def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:
199 if not self.initialized:
200 self.initialize()
201 if not self.enabled:
202 return None
203 client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))
204 if not client:
205 return None
206 self.manager.documents.purge_changes(self.view)
207 completion_list = sublime.CompletionList()
208 client.send_request(
209 Request.complete(text_document_position_params(self.view, locations[0])),
210 lambda res: self.handle_response(res, completion_list, self.view.change_id()),
211 lambda res: self.handle_error(res, completion_list))
212 return completion_list
213
214 def handle_response(self, response: Optional[Union[dict, List]],
215 completion_list: sublime.CompletionList, change_id: Any) -> None:
216 response_items = [] # type: List[Dict]
217 incomplete = False
218 if isinstance(response, dict):
219 response_items = response["items"] or []
220 incomplete = response.get("isIncomplete", False)
221 elif isinstance(response, list):
222 response_items = response
223 response_items = sorted(response_items, key=lambda item: item.get("sortText") or item["label"])
224
225 flags = 0
226 if settings.only_show_lsp_completions:
227 flags |= sublime.INHIBIT_WORD_COMPLETIONS
228 flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
229
230 if incomplete:
231 flags |= sublime.DYNAMIC_COMPLETIONS
232 resolve(completion_list, [format_completion(i, change_id) for i in response_items], flags)
233
234 def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:
235 resolve(completion_list, [])
236 sublime.status_message('Completion error: ' + str(error.get('message')))
237
[end of plugin/completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/completion.py b/plugin/completion.py
--- a/plugin/completion.py
+++ b/plugin/completion.py
@@ -171,29 +171,15 @@
# This is to make ST match with labels that have a weird prefix like a space character.
self.view.settings().set("auto_complete_preserve_order", "none")
- def _view_language(self, config_name: str) -> Optional[str]:
- languages = self.view.settings().get('lsp_language')
- return languages.get(config_name) if languages else None
-
def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:
- completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]
- view_language = self._view_language(session.config.name)
- if view_language:
- for language in session.config.languages:
- if language.id == view_language:
- for scope in language.scopes:
- # debug("registering", trigger_chars, "for", scope)
- scope_trigger = next(
- (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),
- None
- )
- if not scope_trigger: # do not override user's trigger settings.
- completion_triggers.append({
- 'characters': "".join(trigger_chars),
- 'selector': scope
- })
-
- self.view.settings().set('auto_complete_triggers', completion_triggers)
+ completion_triggers = self.view.settings().get('auto_complete_triggers') or [] # type: List[Dict[str, str]]
+
+ completion_triggers.append({
+ 'characters': "".join(trigger_chars),
+ 'selector': "- comment - punctuation.definition.string.end"
+ })
+
+ self.view.settings().set('auto_complete_triggers', completion_triggers)
def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:
if not self.initialized:
| {"golden_diff": "diff --git a/plugin/completion.py b/plugin/completion.py\n--- a/plugin/completion.py\n+++ b/plugin/completion.py\n@@ -171,29 +171,15 @@\n # This is to make ST match with labels that have a weird prefix like a space character.\n self.view.settings().set(\"auto_complete_preserve_order\", \"none\")\n \n- def _view_language(self, config_name: str) -> Optional[str]:\n- languages = self.view.settings().get('lsp_language')\n- return languages.get(config_name) if languages else None\n-\n def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:\n- completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]\n- view_language = self._view_language(session.config.name)\n- if view_language:\n- for language in session.config.languages:\n- if language.id == view_language:\n- for scope in language.scopes:\n- # debug(\"registering\", trigger_chars, \"for\", scope)\n- scope_trigger = next(\n- (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n- None\n- )\n- if not scope_trigger: # do not override user's trigger settings.\n- completion_triggers.append({\n- 'characters': \"\".join(trigger_chars),\n- 'selector': scope\n- })\n-\n- self.view.settings().set('auto_complete_triggers', completion_triggers)\n+ completion_triggers = self.view.settings().get('auto_complete_triggers') or [] # type: List[Dict[str, str]]\n+\n+ completion_triggers.append({\n+ 'characters': \"\".join(trigger_chars),\n+ 'selector': \"- comment - punctuation.definition.string.end\"\n+ })\n+\n+ self.view.settings().set('auto_complete_triggers', completion_triggers)\n \n def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:\n if not self.initialized:\n", "issue": "Keep using trigger characters, or start using selectors?\nFor some reason the final `commit_completion` command re-triggers the completion panel \"forever\". I need to `esc` to hide it.\r\n\r\n\r\n\r\nSettings are\r\n\r\n```\r\n\t\"auto_complete\": true,\r\n\t\"auto_complete_commit_on_tab\": true,\r\n\t\"auto_complete_with_fields\": true,\r\n```\r\n\r\n_Originally posted by @deathaxe in https://github.com/sublimelsp/LSP/pull/866#issuecomment-603466761_\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\nfrom .core.configurations import is_supported_syntax\nfrom .core.edit import parse_text_edit\nfrom .core.logging import debug\nfrom .core.protocol import Request, Range, InsertTextFormat\nfrom .core.registry import session_for_view, client_from_session, LSPViewEventListener\nfrom .core.sessions import Session\nfrom .core.settings import settings, client_configs\nfrom .core.typing import Any, List, Dict, Optional, Union\nfrom .core.views import range_to_region\nfrom .core.views import text_document_position_params\n\n\ncompletion_kinds = {\n 1: (sublime.KIND_ID_MARKUP, \"\u039e\", \"Text\"),\n 2: (sublime.KIND_ID_FUNCTION, \"\u03bb\", \"Method\"),\n 3: (sublime.KIND_ID_FUNCTION, \"\u03bb\", \"Function\"),\n 4: (sublime.KIND_ID_FUNCTION, \"c\", \"Constructor\"),\n 5: (sublime.KIND_ID_VARIABLE, \"f\", \"Field\"),\n 6: (sublime.KIND_ID_VARIABLE, \"v\", \"Variable\"),\n 7: (sublime.KIND_ID_TYPE, \"c\", \"Class\"),\n 8: (sublime.KIND_ID_TYPE, \"i\", \"Interface\"),\n 9: (sublime.KIND_ID_NAMESPACE, \"\u25ea\", \"Module\"),\n 10: (sublime.KIND_ID_VARIABLE, \"\u03c1\", \"Property\"),\n 11: (sublime.KIND_ID_VARIABLE, \"u\", \"Unit\"),\n 12: (sublime.KIND_ID_VARIABLE, \"\u03bd\", \"Value\"),\n 13: (sublime.KIND_ID_TYPE, \"\u03b5\", \"Enum\"),\n 14: (sublime.KIND_ID_KEYWORD, \"\u03ba\", \"Keyword\"),\n 15: (sublime.KIND_ID_SNIPPET, \"s\", \"Snippet\"),\n 16: (sublime.KIND_ID_AMBIGUOUS, \"c\", \"Color\"),\n 17: (sublime.KIND_ID_AMBIGUOUS, \"#\", \"File\"),\n 18: (sublime.KIND_ID_AMBIGUOUS, \"\u21e2\", \"Reference\"),\n 19: (sublime.KIND_ID_AMBIGUOUS, \"\u0192\", \"Folder\"),\n 20: (sublime.KIND_ID_TYPE, \"\u03b5\", \"EnumMember\"),\n 21: (sublime.KIND_ID_VARIABLE, \"\u03c0\", \"Constant\"),\n 22: (sublime.KIND_ID_TYPE, \"s\", \"Struct\"),\n 23: (sublime.KIND_ID_FUNCTION, \"e\", \"Event\"),\n 24: (sublime.KIND_ID_KEYWORD, \"\u03bf\", \"Operator\"),\n 25: (sublime.KIND_ID_TYPE, \"\u03c4\", \"Type Parameter\")\n}\n\n\ndef format_completion(item: dict, change_id: Any) -> sublime.CompletionItem:\n item_kind = item.get(\"kind\")\n if item_kind:\n kind = completion_kinds.get(item_kind, sublime.KIND_AMBIGUOUS)\n else:\n kind = sublime.KIND_AMBIGUOUS\n\n if item.get(\"deprecated\", False):\n kind = (kind[0], '\u26a0', \"\u26a0 {} - Deprecated\".format(kind[2]))\n\n item[\"change_id\"] = change_id\n\n return sublime.CompletionItem.command_completion(\n trigger=item[\"label\"],\n command=\"lsp_select_completion_item\",\n args=item,\n annotation=item.get('detail') or \"\",\n kind=kind\n )\n\n\nclass LspSelectCompletionItemCommand(sublime_plugin.TextCommand):\n \"\"\"\n This command must handle four different kinds of LSP completion items:\n\n 1) plaintext + insertText (e.g. pyls)\n 2) plaintext + textEdit (e.g. intelephense)\n 3) snippet + insertText (???)\n 4) snippet + textEdit (e.g. clangd, intelephense)\n\n For cases (3) and (4) we are forced to use the \"insert_snippet\" command.\n \"\"\"\n\n def run(self, edit: sublime.Edit, **item: Any) -> None:\n # Is it a textEdit or an insertText?\n text_edit = item.get('textEdit')\n if text_edit:\n new_text = text_edit['newText']\n # this region was valid a few view.change_count() moments back ...\n edit_region = range_to_region(Range.from_lsp(text_edit['range']), self.view)\n # ... but this brings it to the present.\n edit_region = self.view.transform_region_from(edit_region, item[\"change_id\"])\n selection = self.view.sel()\n primary_cursor_position = selection[0].b\n for region in reversed(selection):\n # For each selection region, apply the same removal as for the \"primary\" region.\n # To do that, translate, or offset, the LSP edit region into the non-\"primary\" regions.\n # The concept of \"primary\" is our own, and there is no mention of it in the LSP spec.\n translation = region.b - primary_cursor_position\n self.view.erase(edit, sublime.Region(edit_region.a + translation, edit_region.b + translation))\n else:\n new_text = item.get('insertText') or item['label']\n\n # Is it a plaintext or a snippet?\n if item.get(\"insertTextFormat\", InsertTextFormat.PlainText) == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n self.view.run_command(\"insert\", {\"characters\": new_text})\n\n # import statements, etc. some servers only return these after a resolve.\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n else:\n self.do_resolve(item)\n\n def do_resolve(self, item: dict) -> None:\n session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())\n if not session:\n return\n\n client = client_from_session(session)\n if not client:\n return\n\n completion_provider = session.get_capability('completionProvider')\n has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)\n if has_resolve_provider:\n client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n\n def handle_resolve_response(self, response: Optional[dict]) -> None:\n if response:\n additional_edits = response.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n\n def apply_additional_edits(self, additional_edits: List[dict]) -> None:\n edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)\n debug('applying additional edits:', edits)\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n sublime.status_message('Applied additional edits for completion')\n\n\ndef resolve(completion_list: sublime.CompletionList, items: List[sublime.CompletionItem], flags: int = 0) -> None:\n # Resolve the promise on the main thread to prevent any sort of data race for _set_target (see sublime_plugin.py).\n sublime.set_timeout(lambda: completion_list.set_completions(items, flags))\n\n\nclass CompletionHandler(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.initialized = False\n self.enabled = False\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'completion' in settings.disabled_capabilities:\n return False\n\n syntax = view_settings.get('syntax')\n return is_supported_syntax(syntax, client_configs.all) if syntax else False\n\n def initialize(self) -> None:\n self.initialized = True\n session = session_for_view(self.view, 'completionProvider')\n if session:\n completionProvider = session.get_capability('completionProvider') or dict() # type: dict\n # A language server may have an empty dict as CompletionOptions. In that case,\n # no trigger characters will be registered but we'll still respond to Sublime's\n # usual query for completions. So the explicit check for None is necessary.\n self.enabled = True\n\n trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if trigger_chars:\n self.register_trigger_chars(session, trigger_chars)\n # This is to make ST match with labels that have a weird prefix like a space character.\n self.view.settings().set(\"auto_complete_preserve_order\", \"none\")\n\n def _view_language(self, config_name: str) -> Optional[str]:\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if not scope_trigger: # do not override user's trigger settings.\n completion_triggers.append({\n 'characters': \"\".join(trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:\n if not self.initialized:\n self.initialize()\n if not self.enabled:\n return None\n client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))\n if not client:\n return None\n self.manager.documents.purge_changes(self.view)\n completion_list = sublime.CompletionList()\n client.send_request(\n Request.complete(text_document_position_params(self.view, locations[0])),\n lambda res: self.handle_response(res, completion_list, self.view.change_id()),\n lambda res: self.handle_error(res, completion_list))\n return completion_list\n\n def handle_response(self, response: Optional[Union[dict, List]],\n completion_list: sublime.CompletionList, change_id: Any) -> None:\n response_items = [] # type: List[Dict]\n incomplete = False\n if isinstance(response, dict):\n response_items = response[\"items\"] or []\n incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n response_items = response\n response_items = sorted(response_items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n\n flags = 0\n if settings.only_show_lsp_completions:\n flags |= sublime.INHIBIT_WORD_COMPLETIONS\n flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS\n\n if incomplete:\n flags |= sublime.DYNAMIC_COMPLETIONS\n resolve(completion_list, [format_completion(i, change_id) for i in response_items], flags)\n\n def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:\n resolve(completion_list, [])\n sublime.status_message('Completion error: ' + str(error.get('message')))\n", "path": "plugin/completion.py"}]} | 3,804 | 452 |
gh_patches_debug_20102 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-533 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pit bug: the device and type of preds and target may not the same with the results returned by metric_func
## 🐛 Bug
the device and type of preds and target may not the same with the results returned by metric_func
### To Reproduce
use a self-designed metric-func which accepts complex tensor but return real tensor
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- PyTorch Version (e.g., 1.0):
- OS (e.g., Linux):
- How you installed PyTorch (`conda`, `pip`, source):
- Build command you used (if compiling from source):
- Python version:
- CUDA/cuDNN version:
- GPU models and configuration:
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of torchmetrics/functional/audio/pit.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import warnings
15 from itertools import permutations
16 from typing import Any, Callable, Dict, Tuple, Union
17
18 import torch
19 from torch import Tensor
20
21 from torchmetrics.utilities.checks import _check_same_shape
22 from torchmetrics.utilities.imports import _SCIPY_AVAILABLE
23
24 # _ps_dict: cache of permutations
25 # it's necessary to cache it, otherwise it will consume a large amount of time
26 _ps_dict: dict = {} # _ps_dict[str(spk_num)+str(device)] = permutations
27
28
29 def _find_best_perm_by_linear_sum_assignment(
30 metric_mtx: torch.Tensor,
31 eval_func: Union[torch.min, torch.max],
32 ) -> Tuple[Tensor, Tensor]:
33 """Solves the linear sum assignment problem using scipy, and returns the best metric values and the
34 corresponding permutations.
35
36 Args:
37 metric_mtx:
38 the metric matrix, shape [batch_size, spk_num, spk_num]
39 eval_func:
40 the function to reduce the metric values of different the permutations
41
42 Returns:
43 best_metric:
44 shape [batch]
45 best_perm:
46 shape [batch, spk]
47 """
48 from scipy.optimize import linear_sum_assignment
49
50 mmtx = metric_mtx.detach().cpu()
51 best_perm = torch.tensor([linear_sum_assignment(pwm, eval_func == torch.max)[1] for pwm in mmtx])
52 best_perm = best_perm.to(metric_mtx.device)
53 best_metric = torch.gather(metric_mtx, 2, best_perm[:, :, None]).mean([-1, -2])
54 return best_metric, best_perm # shape [batch], shape [batch, spk]
55
56
57 def _find_best_perm_by_exhuastive_method(
58 metric_mtx: torch.Tensor,
59 eval_func: Union[torch.min, torch.max],
60 ) -> Tuple[Tensor, Tensor]:
61 """Solves the linear sum assignment problem using exhuastive method, i.e. exhuastively calculates the metric
62 values of all possible permutations, and returns the best metric values and the corresponding permutations.
63
64 Args:
65 metric_mtx:
66 the metric matrix, shape [batch_size, spk_num, spk_num]
67 eval_func:
68 the function to reduce the metric values of different the permutations
69
70 Returns:
71 best_metric:
72 shape [batch]
73 best_perm:
74 shape [batch, spk]
75 """
76 # create/read/cache the permutations and its indexes
77 # reading from cache would be much faster than creating in CPU then moving to GPU
78 batch_size, spk_num = metric_mtx.shape[:2]
79 key = str(spk_num) + str(metric_mtx.device)
80 if key not in _ps_dict:
81 # ps: all the permutations, shape [spk_num, perm_num]
82 # ps: In i-th permutation, the predcition corresponds to the j-th target is ps[j,i]
83 ps = torch.tensor(list(permutations(range(spk_num))), device=metric_mtx.device).T
84 _ps_dict[key] = ps
85 else:
86 ps = _ps_dict[key] # all the permutations, shape [spk_num, perm_num]
87
88 # find the metric of each permutation
89 perm_num = ps.shape[-1]
90 # shape [batch_size, spk_num, perm_num]
91 bps = ps[None, ...].expand(batch_size, spk_num, perm_num)
92 # shape [batch_size, spk_num, perm_num]
93 metric_of_ps_details = torch.gather(metric_mtx, 2, bps)
94 # shape [batch_size, perm_num]
95 metric_of_ps = metric_of_ps_details.mean(dim=1)
96
97 # find the best metric and best permutation
98 best_metric, best_indexes = eval_func(metric_of_ps, dim=1)
99 best_indexes = best_indexes.detach()
100 best_perm = ps.T[best_indexes, :]
101 return best_metric, best_perm # shape [batch], shape [batch, spk]
102
103
104 def pit(
105 preds: torch.Tensor, target: torch.Tensor, metric_func: Callable, eval_func: str = "max", **kwargs: Dict[str, Any]
106 ) -> Tuple[Tensor, Tensor]:
107 """Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method.
108
109 [1] in speech separation field in order to calculate audio metrics in a permutation invariant way.
110
111 Args:
112 preds:
113 shape [batch, spk, ...]
114 target:
115 shape [batch, spk, ...]
116 metric_func:
117 a metric function accept a batch of target and estimate,
118 i.e. metric_func(preds[:, i, ...], target[:, j, ...]), and returns a batch of metric tensors [batch]
119 eval_func:
120 the function to find the best permutation, can be 'min' or 'max',
121 i.e. the smaller the better or the larger the better.
122 kwargs:
123 additional args for metric_func
124
125 Returns:
126 best_metric of shape [batch],
127 best_perm of shape [batch]
128
129 Example:
130 >>> from torchmetrics.functional.audio import si_sdr
131 >>> # [batch, spk, time]
132 >>> preds = torch.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]])
133 >>> target = torch.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]])
134 >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max')
135 >>> best_metric
136 tensor([-5.1091])
137 >>> best_perm
138 tensor([[0, 1]])
139 >>> pit_permutate(preds, best_perm)
140 tensor([[[-0.0579, 0.3560, -0.9604],
141 [-0.1719, 0.3205, 0.2951]]])
142
143 Reference:
144 [1] `Permutation Invariant Training of Deep Models`_
145 """
146 _check_same_shape(preds, target)
147 if eval_func not in ["max", "min"]:
148 raise ValueError(f'eval_func can only be "max" or "min" but got {eval_func}')
149 if target.ndim < 2:
150 raise ValueError(f"Inputs must be of shape [batch, spk, ...], got {target.shape} and {preds.shape} instead")
151
152 # calculate the metric matrix
153 batch_size, spk_num = target.shape[0:2]
154 metric_mtx = torch.empty((batch_size, spk_num, spk_num), dtype=preds.dtype, device=target.device)
155 for t in range(spk_num):
156 for e in range(spk_num):
157 metric_mtx[:, t, e] = metric_func(preds[:, e, ...], target[:, t, ...], **kwargs)
158
159 # find best
160 op = torch.max if eval_func == "max" else torch.min
161 if spk_num < 3 or not _SCIPY_AVAILABLE:
162 if spk_num >= 3 and not _SCIPY_AVAILABLE:
163 warnings.warn(
164 f"In pit metric for speaker-num {spk_num}>3, we recommend installing scipy for better performance"
165 )
166
167 best_metric, best_perm = _find_best_perm_by_exhuastive_method(metric_mtx, op)
168 else:
169 best_metric, best_perm = _find_best_perm_by_linear_sum_assignment(metric_mtx, op)
170
171 return best_metric, best_perm
172
173
174 def pit_permutate(preds: Tensor, perm: Tensor) -> Tensor:
175 """permutate estimate according to perm.
176
177 Args:
178 preds (Tensor): the estimates you want to permutate, shape [batch, spk, ...]
179 perm (Tensor): the permutation returned from pit, shape [batch, spk]
180
181 Returns:
182 Tensor: the permutated version of estimate
183
184 Example:
185 >>> from torchmetrics.functional.audio import si_sdr
186 >>> # [batch, spk, time]
187 >>> preds = torch.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]])
188 >>> target = torch.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]])
189 >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max')
190 >>> best_metric
191 tensor([-5.1091])
192 >>> best_perm
193 tensor([[0, 1]])
194 >>> pit_permutate(preds, best_perm)
195 tensor([[[-0.0579, 0.3560, -0.9604],
196 [-0.1719, 0.3205, 0.2951]]])
197 """
198 preds_pmted = torch.stack([torch.index_select(pred, 0, p) for pred, p in zip(preds, perm)])
199 return preds_pmted
200
[end of torchmetrics/functional/audio/pit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchmetrics/functional/audio/pit.py b/torchmetrics/functional/audio/pit.py
--- a/torchmetrics/functional/audio/pit.py
+++ b/torchmetrics/functional/audio/pit.py
@@ -151,10 +151,17 @@
# calculate the metric matrix
batch_size, spk_num = target.shape[0:2]
- metric_mtx = torch.empty((batch_size, spk_num, spk_num), dtype=preds.dtype, device=target.device)
- for t in range(spk_num):
- for e in range(spk_num):
- metric_mtx[:, t, e] = metric_func(preds[:, e, ...], target[:, t, ...], **kwargs)
+ metric_mtx = None
+ for target_idx in range(spk_num): # we have spk_num speeches in target in each sample
+ for preds_idx in range(spk_num): # we have spk_num speeches in preds in each sample
+ if metric_mtx is not None:
+ metric_mtx[:, target_idx, preds_idx] = metric_func(
+ preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs
+ )
+ else:
+ first_ele = metric_func(preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs)
+ metric_mtx = torch.empty((batch_size, spk_num, spk_num), dtype=first_ele.dtype, device=first_ele.device)
+ metric_mtx[:, target_idx, preds_idx] = first_ele
# find best
op = torch.max if eval_func == "max" else torch.min
| {"golden_diff": "diff --git a/torchmetrics/functional/audio/pit.py b/torchmetrics/functional/audio/pit.py\n--- a/torchmetrics/functional/audio/pit.py\n+++ b/torchmetrics/functional/audio/pit.py\n@@ -151,10 +151,17 @@\n \n # calculate the metric matrix\n batch_size, spk_num = target.shape[0:2]\n- metric_mtx = torch.empty((batch_size, spk_num, spk_num), dtype=preds.dtype, device=target.device)\n- for t in range(spk_num):\n- for e in range(spk_num):\n- metric_mtx[:, t, e] = metric_func(preds[:, e, ...], target[:, t, ...], **kwargs)\n+ metric_mtx = None\n+ for target_idx in range(spk_num): # we have spk_num speeches in target in each sample\n+ for preds_idx in range(spk_num): # we have spk_num speeches in preds in each sample\n+ if metric_mtx is not None:\n+ metric_mtx[:, target_idx, preds_idx] = metric_func(\n+ preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs\n+ )\n+ else:\n+ first_ele = metric_func(preds[:, preds_idx, ...], target[:, target_idx, ...], **kwargs)\n+ metric_mtx = torch.empty((batch_size, spk_num, spk_num), dtype=first_ele.dtype, device=first_ele.device)\n+ metric_mtx[:, target_idx, preds_idx] = first_ele\n \n # find best\n op = torch.max if eval_func == \"max\" else torch.min\n", "issue": "pit bug: the device and type of preds and target may not the same with the results returned by metric_func\n## \ud83d\udc1b Bug\r\n\r\nthe device and type of preds and target may not the same with the results returned by metric_func\r\n\r\n### To Reproduce\r\n\r\nuse a self-designed metric-func which accepts complex tensor but return real tensor\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n#### Code sample\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- PyTorch Version (e.g., 1.0):\r\n- OS (e.g., Linux):\r\n- How you installed PyTorch (`conda`, `pip`, source):\r\n- Build command you used (if compiling from source):\r\n- Python version:\r\n- CUDA/cuDNN version:\r\n- GPU models and configuration:\r\n- Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom itertools import permutations\nfrom typing import Any, Callable, Dict, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\nfrom torchmetrics.utilities.imports import _SCIPY_AVAILABLE\n\n# _ps_dict: cache of permutations\n# it's necessary to cache it, otherwise it will consume a large amount of time\n_ps_dict: dict = {} # _ps_dict[str(spk_num)+str(device)] = permutations\n\n\ndef _find_best_perm_by_linear_sum_assignment(\n metric_mtx: torch.Tensor,\n eval_func: Union[torch.min, torch.max],\n) -> Tuple[Tensor, Tensor]:\n \"\"\"Solves the linear sum assignment problem using scipy, and returns the best metric values and the\n corresponding permutations.\n\n Args:\n metric_mtx:\n the metric matrix, shape [batch_size, spk_num, spk_num]\n eval_func:\n the function to reduce the metric values of different the permutations\n\n Returns:\n best_metric:\n shape [batch]\n best_perm:\n shape [batch, spk]\n \"\"\"\n from scipy.optimize import linear_sum_assignment\n\n mmtx = metric_mtx.detach().cpu()\n best_perm = torch.tensor([linear_sum_assignment(pwm, eval_func == torch.max)[1] for pwm in mmtx])\n best_perm = best_perm.to(metric_mtx.device)\n best_metric = torch.gather(metric_mtx, 2, best_perm[:, :, None]).mean([-1, -2])\n return best_metric, best_perm # shape [batch], shape [batch, spk]\n\n\ndef _find_best_perm_by_exhuastive_method(\n metric_mtx: torch.Tensor,\n eval_func: Union[torch.min, torch.max],\n) -> Tuple[Tensor, Tensor]:\n \"\"\"Solves the linear sum assignment problem using exhuastive method, i.e. exhuastively calculates the metric\n values of all possible permutations, and returns the best metric values and the corresponding permutations.\n\n Args:\n metric_mtx:\n the metric matrix, shape [batch_size, spk_num, spk_num]\n eval_func:\n the function to reduce the metric values of different the permutations\n\n Returns:\n best_metric:\n shape [batch]\n best_perm:\n shape [batch, spk]\n \"\"\"\n # create/read/cache the permutations and its indexes\n # reading from cache would be much faster than creating in CPU then moving to GPU\n batch_size, spk_num = metric_mtx.shape[:2]\n key = str(spk_num) + str(metric_mtx.device)\n if key not in _ps_dict:\n # ps: all the permutations, shape [spk_num, perm_num]\n # ps: In i-th permutation, the predcition corresponds to the j-th target is ps[j,i]\n ps = torch.tensor(list(permutations(range(spk_num))), device=metric_mtx.device).T\n _ps_dict[key] = ps\n else:\n ps = _ps_dict[key] # all the permutations, shape [spk_num, perm_num]\n\n # find the metric of each permutation\n perm_num = ps.shape[-1]\n # shape [batch_size, spk_num, perm_num]\n bps = ps[None, ...].expand(batch_size, spk_num, perm_num)\n # shape [batch_size, spk_num, perm_num]\n metric_of_ps_details = torch.gather(metric_mtx, 2, bps)\n # shape [batch_size, perm_num]\n metric_of_ps = metric_of_ps_details.mean(dim=1)\n\n # find the best metric and best permutation\n best_metric, best_indexes = eval_func(metric_of_ps, dim=1)\n best_indexes = best_indexes.detach()\n best_perm = ps.T[best_indexes, :]\n return best_metric, best_perm # shape [batch], shape [batch, spk]\n\n\ndef pit(\n preds: torch.Tensor, target: torch.Tensor, metric_func: Callable, eval_func: str = \"max\", **kwargs: Dict[str, Any]\n) -> Tuple[Tensor, Tensor]:\n \"\"\"Permutation invariant training (PIT). The PIT implements the famous Permutation Invariant Training method.\n\n [1] in speech separation field in order to calculate audio metrics in a permutation invariant way.\n\n Args:\n preds:\n shape [batch, spk, ...]\n target:\n shape [batch, spk, ...]\n metric_func:\n a metric function accept a batch of target and estimate,\n i.e. metric_func(preds[:, i, ...], target[:, j, ...]), and returns a batch of metric tensors [batch]\n eval_func:\n the function to find the best permutation, can be 'min' or 'max',\n i.e. the smaller the better or the larger the better.\n kwargs:\n additional args for metric_func\n\n Returns:\n best_metric of shape [batch],\n best_perm of shape [batch]\n\n Example:\n >>> from torchmetrics.functional.audio import si_sdr\n >>> # [batch, spk, time]\n >>> preds = torch.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]])\n >>> target = torch.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]])\n >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max')\n >>> best_metric\n tensor([-5.1091])\n >>> best_perm\n tensor([[0, 1]])\n >>> pit_permutate(preds, best_perm)\n tensor([[[-0.0579, 0.3560, -0.9604],\n [-0.1719, 0.3205, 0.2951]]])\n\n Reference:\n [1]\t`Permutation Invariant Training of Deep Models`_\n \"\"\"\n _check_same_shape(preds, target)\n if eval_func not in [\"max\", \"min\"]:\n raise ValueError(f'eval_func can only be \"max\" or \"min\" but got {eval_func}')\n if target.ndim < 2:\n raise ValueError(f\"Inputs must be of shape [batch, spk, ...], got {target.shape} and {preds.shape} instead\")\n\n # calculate the metric matrix\n batch_size, spk_num = target.shape[0:2]\n metric_mtx = torch.empty((batch_size, spk_num, spk_num), dtype=preds.dtype, device=target.device)\n for t in range(spk_num):\n for e in range(spk_num):\n metric_mtx[:, t, e] = metric_func(preds[:, e, ...], target[:, t, ...], **kwargs)\n\n # find best\n op = torch.max if eval_func == \"max\" else torch.min\n if spk_num < 3 or not _SCIPY_AVAILABLE:\n if spk_num >= 3 and not _SCIPY_AVAILABLE:\n warnings.warn(\n f\"In pit metric for speaker-num {spk_num}>3, we recommend installing scipy for better performance\"\n )\n\n best_metric, best_perm = _find_best_perm_by_exhuastive_method(metric_mtx, op)\n else:\n best_metric, best_perm = _find_best_perm_by_linear_sum_assignment(metric_mtx, op)\n\n return best_metric, best_perm\n\n\ndef pit_permutate(preds: Tensor, perm: Tensor) -> Tensor:\n \"\"\"permutate estimate according to perm.\n\n Args:\n preds (Tensor): the estimates you want to permutate, shape [batch, spk, ...]\n perm (Tensor): the permutation returned from pit, shape [batch, spk]\n\n Returns:\n Tensor: the permutated version of estimate\n\n Example:\n >>> from torchmetrics.functional.audio import si_sdr\n >>> # [batch, spk, time]\n >>> preds = torch.tensor([[[-0.0579, 0.3560, -0.9604], [-0.1719, 0.3205, 0.2951]]])\n >>> target = torch.tensor([[[ 1.0958, -0.1648, 0.5228], [-0.4100, 1.1942, -0.5103]]])\n >>> best_metric, best_perm = pit(preds, target, si_sdr, 'max')\n >>> best_metric\n tensor([-5.1091])\n >>> best_perm\n tensor([[0, 1]])\n >>> pit_permutate(preds, best_perm)\n tensor([[[-0.0579, 0.3560, -0.9604],\n [-0.1719, 0.3205, 0.2951]]])\n \"\"\"\n preds_pmted = torch.stack([torch.index_select(pred, 0, p) for pred, p in zip(preds, perm)])\n return preds_pmted\n", "path": "torchmetrics/functional/audio/pit.py"}]} | 3,455 | 362 |
gh_patches_debug_26341 | rasdani/github-patches | git_diff | freqtrade__freqtrade-1896 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--help produces traceback
Seems I broke it somehow.
`python3 freqtrade hyperopt --help`
produces traceback
```
Fatal exception!
Traceback (most recent call last):
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py", line 42, in main
args: Namespace = arguments.get_parsed_arg()
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py", line 46, in get_parsed_arg
self.parsed_arg = self.parse_args()
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py", line 54, in parse_args
parsed_arg = self.parser.parse_args(self.args)
File "/usr/lib/python3.6/argparse.py", line 1743, in parse_args
args, argv = self.parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1775, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1963, in _parse_known_args
positionals_end_index = consume_positionals(start_index)
File "/usr/lib/python3.6/argparse.py", line 1940, in consume_positionals
take_action(action, args)
File "/usr/lib/python3.6/argparse.py", line 1849, in take_action
action(self, namespace, argument_values, option_string)
File "/usr/lib/python3.6/argparse.py", line 1146, in __call__
subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)
File "/usr/lib/python3.6/argparse.py", line 1775, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1981, in _parse_known_args
start_index = consume_optional(start_index)
File "/usr/lib/python3.6/argparse.py", line 1921, in consume_optional
take_action(action, args, option_string)
File "/usr/lib/python3.6/argparse.py", line 1849, in take_action
action(self, namespace, argument_values, option_string)
File "/usr/lib/python3.6/argparse.py", line 1034, in __call__
parser.exit()
File "/usr/lib/python3.6/argparse.py", line 2389, in exit
_sys.exit(status)
SystemExit: 0
```
</issue>
<code>
[start of freqtrade/main.py]
1 #!/usr/bin/env python3
2 """
3 Main Freqtrade bot script.
4 Read the documentation to know what cli arguments you need.
5 """
6
7 import sys
8 # check min. python version
9 if sys.version_info < (3, 6):
10 sys.exit("Freqtrade requires Python version >= 3.6")
11
12 # flake8: noqa E402
13 import logging
14 from argparse import Namespace
15 from typing import List
16
17 from freqtrade import OperationalException
18 from freqtrade.arguments import Arguments
19 from freqtrade.configuration import set_loggers
20 from freqtrade.worker import Worker
21
22
23 logger = logging.getLogger('freqtrade')
24
25
26 def main(sysargv: List[str] = None) -> None:
27 """
28 This function will initiate the bot and start the trading loop.
29 :return: None
30 """
31
32 try:
33 set_loggers()
34
35 worker = None
36 return_code = 1
37
38 arguments = Arguments(
39 sysargv,
40 'Free, open source crypto trading bot'
41 )
42 args: Namespace = arguments.get_parsed_arg()
43
44 # A subcommand has been issued.
45 # Means if Backtesting or Hyperopt have been called we exit the bot
46 if hasattr(args, 'func'):
47 args.func(args)
48 # TODO: fetch return_code as returned by the command function here
49 return_code = 0
50 else:
51 # Load and run worker
52 worker = Worker(args)
53 worker.run()
54
55 except KeyboardInterrupt:
56 logger.info('SIGINT received, aborting ...')
57 return_code = 0
58 except OperationalException as e:
59 logger.error(str(e))
60 return_code = 2
61 except BaseException:
62 logger.exception('Fatal exception!')
63 finally:
64 if worker:
65 worker.exit()
66 sys.exit(return_code)
67
68
69 if __name__ == '__main__':
70 main()
71
[end of freqtrade/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/main.py b/freqtrade/main.py
--- a/freqtrade/main.py
+++ b/freqtrade/main.py
@@ -12,7 +12,7 @@
# flake8: noqa E402
import logging
from argparse import Namespace
-from typing import List
+from typing import Any, List
from freqtrade import OperationalException
from freqtrade.arguments import Arguments
@@ -29,12 +29,11 @@
:return: None
"""
+ return_code: Any = 1
+ worker = None
try:
set_loggers()
- worker = None
- return_code = 1
-
arguments = Arguments(
sysargv,
'Free, open source crypto trading bot'
@@ -52,13 +51,15 @@
worker = Worker(args)
worker.run()
+ except SystemExit as e:
+ return_code = e
except KeyboardInterrupt:
logger.info('SIGINT received, aborting ...')
return_code = 0
except OperationalException as e:
logger.error(str(e))
return_code = 2
- except BaseException:
+ except Exception:
logger.exception('Fatal exception!')
finally:
if worker:
| {"golden_diff": "diff --git a/freqtrade/main.py b/freqtrade/main.py\n--- a/freqtrade/main.py\n+++ b/freqtrade/main.py\n@@ -12,7 +12,7 @@\n # flake8: noqa E402\n import logging\n from argparse import Namespace\n-from typing import List\n+from typing import Any, List\n \n from freqtrade import OperationalException\n from freqtrade.arguments import Arguments\n@@ -29,12 +29,11 @@\n :return: None\n \"\"\"\n \n+ return_code: Any = 1\n+ worker = None\n try:\n set_loggers()\n \n- worker = None\n- return_code = 1\n-\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n@@ -52,13 +51,15 @@\n worker = Worker(args)\n worker.run()\n \n+ except SystemExit as e:\n+ return_code = e\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n- except BaseException:\n+ except Exception:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n", "issue": "--help produces traceback\nSeems I broke it somehow.\r\n\r\n`python3 freqtrade hyperopt --help`\r\nproduces traceback \r\n```\r\nFatal exception!\r\nTraceback (most recent call last):\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py\", line 42, in main\r\n args: Namespace = arguments.get_parsed_arg()\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py\", line 46, in get_parsed_arg\r\n self.parsed_arg = self.parse_args()\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py\", line 54, in parse_args\r\n parsed_arg = self.parser.parse_args(self.args)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1743, in parse_args\r\n args, argv = self.parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1775, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1963, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1940, in consume_positionals\r\n take_action(action, args)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1849, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1146, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1775, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1981, in _parse_known_args\r\n start_index = consume_optional(start_index)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1921, in consume_optional\r\n take_action(action, args, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1849, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1034, in __call__\r\n parser.exit()\r\n File \"/usr/lib/python3.6/argparse.py\", line 2389, in exit\r\n _sys.exit(status)\r\nSystemExit: 0\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"\nMain Freqtrade bot script.\nRead the documentation to know what cli arguments you need.\n\"\"\"\n\nimport sys\n# check min. python version\nif sys.version_info < (3, 6):\n sys.exit(\"Freqtrade requires Python version >= 3.6\")\n\n# flake8: noqa E402\nimport logging\nfrom argparse import Namespace\nfrom typing import List\n\nfrom freqtrade import OperationalException\nfrom freqtrade.arguments import Arguments\nfrom freqtrade.configuration import set_loggers\nfrom freqtrade.worker import Worker\n\n\nlogger = logging.getLogger('freqtrade')\n\n\ndef main(sysargv: List[str] = None) -> None:\n \"\"\"\n This function will initiate the bot and start the trading loop.\n :return: None\n \"\"\"\n\n try:\n set_loggers()\n\n worker = None\n return_code = 1\n\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n )\n args: Namespace = arguments.get_parsed_arg()\n\n # A subcommand has been issued.\n # Means if Backtesting or Hyperopt have been called we exit the bot\n if hasattr(args, 'func'):\n args.func(args)\n # TODO: fetch return_code as returned by the command function here\n return_code = 0\n else:\n # Load and run worker\n worker = Worker(args)\n worker.run()\n\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n except BaseException:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n worker.exit()\n sys.exit(return_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "freqtrade/main.py"}]} | 1,685 | 280 |
gh_patches_debug_31268 | rasdani/github-patches | git_diff | kornia__kornia-2131 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Weird behavior of LongestMaxSize
### Describe the bug
Hello me again,
I might be doing something wrong with the way I use kornia augmentations, please let me know if it is the case.
I was expecting `LongestMaxSize` in kornia to perform similarily as the albumentation implementation. Meaning that I can throw any images with different shapes to the the transformation function and get an image with different shapes but similar ratios. The largest size being equal to the value given to `LongestMaxSize`.
See bellow a small code sample that disturbs me.
### Reproduction steps
```bash
import kornia.augmentation as K
a = torch.ones((512, 256))
b = torch.ones((512, 756))
print("first try")
transfo = K.LongestMaxSize(max_size=256, p=1.)
print(transfo(a).shape)
print(transfo(b).shape)
print("second try")
a = torch.ones((512, 256))
b = torch.ones((512, 756))
transfo = K.LongestMaxSize(max_size=256, p=1.)
print(transfo(b).shape)
print(transfo(a).shape)
Outputs:
first try
torch.Size([1, 1, 256, 128])
torch.Size([1, 1, 256, 128])
second try
torch.Size([1, 1, 173, 256])
torch.Size([1, 1, 173, 256])
```
### Expected behavior
I would expect to have the same values for the transformations no matter the order of the elements.
ie `transfo(a).shape == torch.Size([1, 1, 256, 128])` and `transfo(b).shape ==torch.Size([1, 1, 173, 256])`
Am I missing something here ?
### Environment
```shell
kornia='0.6.9'
torch='1.12.1+cu113'
```
### Additional context
_No response_
</issue>
<code>
[start of kornia/augmentation/random_generator/_2d/resize.py]
1 from typing import Dict, Tuple, Union
2
3 import torch
4
5 from kornia.augmentation.random_generator.base import RandomGeneratorBase
6 from kornia.augmentation.utils import _common_param_check
7 from kornia.core import Device, Tensor, tensor
8 from kornia.geometry.bbox import bbox_generator
9 from kornia.geometry.transform.affwarp import _side_to_image_size
10
11
12 class ResizeGenerator(RandomGeneratorBase):
13 r"""Get parameters for ```resize``` transformation for resize transform.
14
15 Args:
16 resize_to: Desired output size of the crop, like (h, w).
17 side: Which side to resize if `resize_to` is only of type int.
18
19 Returns:
20 parameters to be passed for transformation.
21 - src (Tensor): cropping bounding boxes with a shape of (B, 4, 2).
22 - dst (Tensor): output bounding boxes with a shape (B, 4, 2).
23 - input_size (Tensor): (h, w) from batch input.
24 - resize_to (tuple): new (h, w) for batch input.
25
26 Note:
27 The generated random numbers are not reproducible across different devices and dtypes. By default,
28 the parameters will be generated on CPU in float32. This can be changed by calling
29 ``self.set_rng_device_and_dtype(device="cuda", dtype=torch.float64)``.
30 """
31
32 def __init__(self, resize_to: Union[int, Tuple[int, int]], side: str = "short") -> None:
33 super().__init__()
34 self.output_size = resize_to
35 self.side = side
36
37 def __repr__(self) -> str:
38 repr = f"output_size={self.output_size}"
39 return repr
40
41 def make_samplers(self, device: Device, dtype: torch.dtype) -> None:
42 self.device = device
43 self.dtype = dtype
44 pass
45
46 def forward(self, batch_shape: torch.Size, same_on_batch: bool = False) -> Dict[str, Tensor]:
47 batch_size = batch_shape[0]
48 _common_param_check(batch_size, same_on_batch)
49 _device = self.device
50 _dtype = self.dtype
51
52 if batch_size == 0:
53 return dict(
54 src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),
55 dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),
56 )
57
58 input_size = h, w = (batch_shape[-2], batch_shape[-1])
59
60 src = bbox_generator(
61 tensor(0, device=_device, dtype=_dtype),
62 tensor(0, device=_device, dtype=_dtype),
63 tensor(input_size[1], device=_device, dtype=_dtype),
64 tensor(input_size[0], device=_device, dtype=_dtype),
65 ).repeat(batch_size, 1, 1)
66
67 if isinstance(self.output_size, int):
68 aspect_ratio = w / h
69 self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)
70
71 if not (
72 len(self.output_size) == 2
73 and isinstance(self.output_size[0], (int,))
74 and isinstance(self.output_size[1], (int,))
75 and self.output_size[0] > 0
76 and self.output_size[1] > 0
77 ):
78 raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.")
79
80 dst = bbox_generator(
81 tensor(0, device=_device, dtype=_dtype),
82 tensor(0, device=_device, dtype=_dtype),
83 tensor(self.output_size[1], device=_device, dtype=_dtype),
84 tensor(self.output_size[0], device=_device, dtype=_dtype),
85 ).repeat(batch_size, 1, 1)
86
87 _input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)
88 _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)
89
90 return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)
91
[end of kornia/augmentation/random_generator/_2d/resize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/augmentation/random_generator/_2d/resize.py b/kornia/augmentation/random_generator/_2d/resize.py
--- a/kornia/augmentation/random_generator/_2d/resize.py
+++ b/kornia/augmentation/random_generator/_2d/resize.py
@@ -66,25 +66,27 @@
if isinstance(self.output_size, int):
aspect_ratio = w / h
- self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)
+ output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)
+ else:
+ output_size = self.output_size
if not (
- len(self.output_size) == 2
- and isinstance(self.output_size[0], (int,))
- and isinstance(self.output_size[1], (int,))
- and self.output_size[0] > 0
- and self.output_size[1] > 0
+ len(output_size) == 2
+ and isinstance(output_size[0], (int,))
+ and isinstance(output_size[1], (int,))
+ and output_size[0] > 0
+ and output_size[1] > 0
):
- raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.")
+ raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {output_size}.")
dst = bbox_generator(
tensor(0, device=_device, dtype=_dtype),
tensor(0, device=_device, dtype=_dtype),
- tensor(self.output_size[1], device=_device, dtype=_dtype),
- tensor(self.output_size[0], device=_device, dtype=_dtype),
+ tensor(output_size[1], device=_device, dtype=_dtype),
+ tensor(output_size[0], device=_device, dtype=_dtype),
).repeat(batch_size, 1, 1)
_input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)
- _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)
+ _output_size = tensor(output_size, device=_device, dtype=torch.long).expand(batch_size, -1)
return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)
| {"golden_diff": "diff --git a/kornia/augmentation/random_generator/_2d/resize.py b/kornia/augmentation/random_generator/_2d/resize.py\n--- a/kornia/augmentation/random_generator/_2d/resize.py\n+++ b/kornia/augmentation/random_generator/_2d/resize.py\n@@ -66,25 +66,27 @@\n \n if isinstance(self.output_size, int):\n aspect_ratio = w / h\n- self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)\n+ output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)\n+ else:\n+ output_size = self.output_size\n \n if not (\n- len(self.output_size) == 2\n- and isinstance(self.output_size[0], (int,))\n- and isinstance(self.output_size[1], (int,))\n- and self.output_size[0] > 0\n- and self.output_size[1] > 0\n+ len(output_size) == 2\n+ and isinstance(output_size[0], (int,))\n+ and isinstance(output_size[1], (int,))\n+ and output_size[0] > 0\n+ and output_size[1] > 0\n ):\n- raise AssertionError(f\"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.\")\n+ raise AssertionError(f\"`resize_to` must be a tuple of 2 positive integers. Got {output_size}.\")\n \n dst = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n- tensor(self.output_size[1], device=_device, dtype=_dtype),\n- tensor(self.output_size[0], device=_device, dtype=_dtype),\n+ tensor(output_size[1], device=_device, dtype=_dtype),\n+ tensor(output_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n \n _input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n- _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n+ _output_size = tensor(output_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n \n return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)\n", "issue": "Weird behavior of LongestMaxSize\n### Describe the bug\r\n\r\nHello me again,\r\n\r\nI might be doing something wrong with the way I use kornia augmentations, please let me know if it is the case.\r\n\r\nI was expecting `LongestMaxSize` in kornia to perform similarily as the albumentation implementation. Meaning that I can throw any images with different shapes to the the transformation function and get an image with different shapes but similar ratios. The largest size being equal to the value given to `LongestMaxSize`.\r\n\r\nSee bellow a small code sample that disturbs me.\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\nimport kornia.augmentation as K\r\na = torch.ones((512, 256))\r\nb = torch.ones((512, 756))\r\n\r\nprint(\"first try\")\r\ntransfo = K.LongestMaxSize(max_size=256, p=1.)\r\n\r\nprint(transfo(a).shape)\r\nprint(transfo(b).shape)\r\n\r\nprint(\"second try\")\r\n\r\na = torch.ones((512, 256))\r\nb = torch.ones((512, 756))\r\n\r\ntransfo = K.LongestMaxSize(max_size=256, p=1.)\r\nprint(transfo(b).shape)\r\nprint(transfo(a).shape)\r\n\r\nOutputs:\r\nfirst try\r\ntorch.Size([1, 1, 256, 128])\r\ntorch.Size([1, 1, 256, 128])\r\nsecond try\r\ntorch.Size([1, 1, 173, 256])\r\ntorch.Size([1, 1, 173, 256])\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nI would expect to have the same values for the transformations no matter the order of the elements.\r\n\r\nie `transfo(a).shape == torch.Size([1, 1, 256, 128])` and `transfo(b).shape ==torch.Size([1, 1, 173, 256])`\r\n\r\nAm I missing something here ?\r\n\r\n### Environment\r\n\r\n```shell\r\nkornia='0.6.9'\r\ntorch='1.12.1+cu113'\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "from typing import Dict, Tuple, Union\n\nimport torch\n\nfrom kornia.augmentation.random_generator.base import RandomGeneratorBase\nfrom kornia.augmentation.utils import _common_param_check\nfrom kornia.core import Device, Tensor, tensor\nfrom kornia.geometry.bbox import bbox_generator\nfrom kornia.geometry.transform.affwarp import _side_to_image_size\n\n\nclass ResizeGenerator(RandomGeneratorBase):\n r\"\"\"Get parameters for ```resize``` transformation for resize transform.\n\n Args:\n resize_to: Desired output size of the crop, like (h, w).\n side: Which side to resize if `resize_to` is only of type int.\n\n Returns:\n parameters to be passed for transformation.\n - src (Tensor): cropping bounding boxes with a shape of (B, 4, 2).\n - dst (Tensor): output bounding boxes with a shape (B, 4, 2).\n - input_size (Tensor): (h, w) from batch input.\n - resize_to (tuple): new (h, w) for batch input.\n\n Note:\n The generated random numbers are not reproducible across different devices and dtypes. By default,\n the parameters will be generated on CPU in float32. This can be changed by calling\n ``self.set_rng_device_and_dtype(device=\"cuda\", dtype=torch.float64)``.\n \"\"\"\n\n def __init__(self, resize_to: Union[int, Tuple[int, int]], side: str = \"short\") -> None:\n super().__init__()\n self.output_size = resize_to\n self.side = side\n\n def __repr__(self) -> str:\n repr = f\"output_size={self.output_size}\"\n return repr\n\n def make_samplers(self, device: Device, dtype: torch.dtype) -> None:\n self.device = device\n self.dtype = dtype\n pass\n\n def forward(self, batch_shape: torch.Size, same_on_batch: bool = False) -> Dict[str, Tensor]:\n batch_size = batch_shape[0]\n _common_param_check(batch_size, same_on_batch)\n _device = self.device\n _dtype = self.dtype\n\n if batch_size == 0:\n return dict(\n src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),\n dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),\n )\n\n input_size = h, w = (batch_shape[-2], batch_shape[-1])\n\n src = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n tensor(input_size[1], device=_device, dtype=_dtype),\n tensor(input_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n\n if isinstance(self.output_size, int):\n aspect_ratio = w / h\n self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)\n\n if not (\n len(self.output_size) == 2\n and isinstance(self.output_size[0], (int,))\n and isinstance(self.output_size[1], (int,))\n and self.output_size[0] > 0\n and self.output_size[1] > 0\n ):\n raise AssertionError(f\"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.\")\n\n dst = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n tensor(self.output_size[1], device=_device, dtype=_dtype),\n tensor(self.output_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n\n _input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n\n return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)\n", "path": "kornia/augmentation/random_generator/_2d/resize.py"}]} | 2,109 | 543 |
gh_patches_debug_39944 | rasdani/github-patches | git_diff | cobbler__cobbler-2919 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Security: Stabalize the MongoDB serializer
### Describe the bug
This is the upstream bug report for SUSE/spacewalk#16737 which is a mirror issue of an internal Bugzilla issue.
Copied from the Bugzilla comment by @thesp0nge:
In mongodb serializer class, when the config file is read, there is no sanity check.
If the file get somewhat corrupted, it can lead to unexpected behaviour.
```python
def __connect(configfile: str = "/etc/cobbler/mongodb.conf"):
"""
Reads the config file for mongodb and then connects to the mongodb.
"""
cp = ConfigParser()
cp.read(configfile)
host = cp.get("connection", "host")
port = int(cp.get("connection", "port"))
# pylint: disable=global-statement
global mongodb
mongodb = MongoClient(host, port)['cobbler']
```
### Steps to reproduce
1. Corrupt the `mongodb.conf`
2. Start Cobbler
3. See error
### Expected behavior
We get a better exception and Cobbler is prevented from starting up.
### Cobbler version
<!--- Paste output from `cobbler version` -->
````paste below
````
### Operating system
<!--- On which operating system do you use Cobbler? -->
### Cobbler log
<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->
````paste below
````
### Screenshots
<!--- If applicable, add screenshots to help explain your problem. -->
### Additional information
<!--- Add any other context about the problem here. -->
</issue>
<code>
[start of cobbler/modules/serializers/mongodb.py]
1 """
2 Cobbler's Mongo database based object serializer.
3 Experimental version.
4
5 Copyright 2006-2009, Red Hat, Inc and Others
6 Michael DeHaan <michael.dehaan AT gmail>
7 James Cammarata <[email protected]>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301 USA
23 """
24
25 from configparser import ConfigParser
26
27 from cobbler import settings
28 from cobbler.cexceptions import CX
29
30 try:
31 from pymongo import MongoClient
32 from pymongo.errors import ConnectionFailure, ConfigurationError
33 pymongo_loaded = True
34 except ModuleNotFoundError:
35 # FIXME: log message
36 pymongo_loaded = False
37
38 mongodb = None
39
40
41 def __connect(configfile: str = "/etc/cobbler/mongodb.conf"):
42 """
43 Reads the config file for mongodb and then connects to the mongodb.
44 """
45 cp = ConfigParser()
46 cp.read(configfile)
47
48 host = cp.get("connection", "host")
49 port = int(cp.get("connection", "port"))
50 # pylint: disable=global-statement
51 global mongodb
52 mongodb = MongoClient(host, port)['cobbler']
53 try:
54 # The ismaster command is cheap and doesn't require auth.
55 mongodb.admin.command('ismaster')
56 except ConnectionFailure as e:
57 # FIXME: log error
58 raise CX("Unable to connect to Mongo database or get database \"cobbler\"") from e
59 except ConfigurationError as e:
60 raise CX("The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.") from e
61
62
63 def register() -> str:
64 """
65 The mandatory Cobbler module registration hook.
66 """
67 # FIXME: only run this if enabled.
68 if not pymongo_loaded:
69 return ""
70 return "serializer"
71
72
73 def what() -> str:
74 """
75 Module identification function
76 """
77 return "serializer/mongodb"
78
79
80 def serialize_item(collection, item):
81 """
82 Save a collection item to database.
83
84 :param collection: collection
85 :param item: collection item
86 """
87
88 __connect()
89 collection = mongodb[collection.collection_type()]
90 data = collection.find_one({'name': item.name})
91 if data:
92 collection.update({'name': item.name}, item.serialize())
93 else:
94 collection.insert(item.serialize())
95
96
97 def serialize_delete(collection, item):
98 """
99 Delete a collection item from database.
100
101 :param collection: collection
102 :param item: collection item
103 """
104
105 __connect()
106 collection = mongodb[collection.collection_type()]
107 collection.remove({'name': item.name})
108
109
110 def serialize(collection):
111 """
112 Save a collection to database
113
114 :param collection: collection
115 """
116
117 # TODO: error detection
118 ctype = collection.collection_type()
119 if ctype != "settings":
120 for x in collection:
121 serialize_item(collection, x)
122
123
124 def deserialize_raw(collection_type: str):
125 """
126 Get a collection from mongodb and parse it into an object.
127
128 :param collection_type: The collection type to fetch.
129 :return: The first element of the collection requested.
130 """
131 if collection_type == "settings":
132 return settings.read_settings_file()
133 else:
134 __connect()
135 collection = mongodb[collection_type]
136 return collection.find()
137
138
139 def deserialize(collection, topological: bool = True):
140 """
141 Load a collection from the database.
142
143 :param collection: The collection to deserialize.
144 :param topological: If the collection list should be sorted by the collection dict depth value or not.
145 """
146
147 datastruct = deserialize_raw(collection.collection_type())
148 if topological and type(datastruct) == list:
149 datastruct.sort(key=lambda x: x["depth"])
150 if type(datastruct) == dict:
151 collection.from_dict(datastruct)
152 elif type(datastruct) == list:
153 collection.from_list(datastruct)
154
[end of cobbler/modules/serializers/mongodb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/modules/serializers/mongodb.py b/cobbler/modules/serializers/mongodb.py
--- a/cobbler/modules/serializers/mongodb.py
+++ b/cobbler/modules/serializers/mongodb.py
@@ -21,7 +21,8 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
-
+import configparser
+import pathlib
from configparser import ConfigParser
from cobbler import settings
@@ -30,6 +31,7 @@
try:
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, ConfigurationError
+
pymongo_loaded = True
except ModuleNotFoundError:
# FIXME: log message
@@ -42,22 +44,34 @@
"""
Reads the config file for mongodb and then connects to the mongodb.
"""
- cp = ConfigParser()
- cp.read(configfile)
+ if not pathlib.Path(configfile).is_file():
+ raise FileNotFoundError(
+ "Specified Cobbler MongoDB config file could not be found!"
+ )
- host = cp.get("connection", "host")
- port = int(cp.get("connection", "port"))
+ cp = ConfigParser()
+ try:
+ cp.read(configfile)
+ except configparser.Error as cp_error:
+ raise configparser.Error(
+ "Could not read Cobbler MongoDB config file!"
+ ) from cp_error
+
+ host = cp.get("connection", "host", fallback="localhost")
+ port = cp.getint("connection", "port", fallback=27017)
# pylint: disable=global-statement
global mongodb
- mongodb = MongoClient(host, port)['cobbler']
+ mongodb = MongoClient(host, port)["cobbler"]
try:
# The ismaster command is cheap and doesn't require auth.
- mongodb.admin.command('ismaster')
+ mongodb.admin.command("ismaster")
except ConnectionFailure as e:
# FIXME: log error
- raise CX("Unable to connect to Mongo database or get database \"cobbler\"") from e
+ raise CX('Unable to connect to Mongo database or get database "cobbler"') from e
except ConfigurationError as e:
- raise CX("The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.") from e
+ raise CX(
+ "The configuration of the MongoDB connection isn't correct, please check the Cobbler settings."
+ ) from e
def register() -> str:
@@ -87,9 +101,9 @@
__connect()
collection = mongodb[collection.collection_type()]
- data = collection.find_one({'name': item.name})
+ data = collection.find_one({"name": item.name})
if data:
- collection.update({'name': item.name}, item.serialize())
+ collection.update({"name": item.name}, item.serialize())
else:
collection.insert(item.serialize())
@@ -104,7 +118,7 @@
__connect()
collection = mongodb[collection.collection_type()]
- collection.remove({'name': item.name})
+ collection.remove({"name": item.name})
def serialize(collection):
| {"golden_diff": "diff --git a/cobbler/modules/serializers/mongodb.py b/cobbler/modules/serializers/mongodb.py\n--- a/cobbler/modules/serializers/mongodb.py\n+++ b/cobbler/modules/serializers/mongodb.py\n@@ -21,7 +21,8 @@\n Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n 02110-1301 USA\n \"\"\"\n-\n+import configparser\n+import pathlib\n from configparser import ConfigParser\n \n from cobbler import settings\n@@ -30,6 +31,7 @@\n try:\n from pymongo import MongoClient\n from pymongo.errors import ConnectionFailure, ConfigurationError\n+\n pymongo_loaded = True\n except ModuleNotFoundError:\n # FIXME: log message\n@@ -42,22 +44,34 @@\n \"\"\"\n Reads the config file for mongodb and then connects to the mongodb.\n \"\"\"\n- cp = ConfigParser()\n- cp.read(configfile)\n+ if not pathlib.Path(configfile).is_file():\n+ raise FileNotFoundError(\n+ \"Specified Cobbler MongoDB config file could not be found!\"\n+ )\n \n- host = cp.get(\"connection\", \"host\")\n- port = int(cp.get(\"connection\", \"port\"))\n+ cp = ConfigParser()\n+ try:\n+ cp.read(configfile)\n+ except configparser.Error as cp_error:\n+ raise configparser.Error(\n+ \"Could not read Cobbler MongoDB config file!\"\n+ ) from cp_error\n+\n+ host = cp.get(\"connection\", \"host\", fallback=\"localhost\")\n+ port = cp.getint(\"connection\", \"port\", fallback=27017)\n # pylint: disable=global-statement\n global mongodb\n- mongodb = MongoClient(host, port)['cobbler']\n+ mongodb = MongoClient(host, port)[\"cobbler\"]\n try:\n # The ismaster command is cheap and doesn't require auth.\n- mongodb.admin.command('ismaster')\n+ mongodb.admin.command(\"ismaster\")\n except ConnectionFailure as e:\n # FIXME: log error\n- raise CX(\"Unable to connect to Mongo database or get database \\\"cobbler\\\"\") from e\n+ raise CX('Unable to connect to Mongo database or get database \"cobbler\"') from e\n except ConfigurationError as e:\n- raise CX(\"The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.\") from e\n+ raise CX(\n+ \"The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.\"\n+ ) from e\n \n \n def register() -> str:\n@@ -87,9 +101,9 @@\n \n __connect()\n collection = mongodb[collection.collection_type()]\n- data = collection.find_one({'name': item.name})\n+ data = collection.find_one({\"name\": item.name})\n if data:\n- collection.update({'name': item.name}, item.serialize())\n+ collection.update({\"name\": item.name}, item.serialize())\n else:\n collection.insert(item.serialize())\n \n@@ -104,7 +118,7 @@\n \n __connect()\n collection = mongodb[collection.collection_type()]\n- collection.remove({'name': item.name})\n+ collection.remove({\"name\": item.name})\n \n \n def serialize(collection):\n", "issue": "Security: Stabalize the MongoDB serializer\n### Describe the bug\r\n\r\nThis is the upstream bug report for SUSE/spacewalk#16737 which is a mirror issue of an internal Bugzilla issue.\r\n\r\nCopied from the Bugzilla comment by @thesp0nge:\r\n\r\nIn mongodb serializer class, when the config file is read, there is no sanity check.\r\nIf the file get somewhat corrupted, it can lead to unexpected behaviour.\r\n\r\n```python\r\ndef __connect(configfile: str = \"/etc/cobbler/mongodb.conf\"):\r\n \"\"\"\r\n Reads the config file for mongodb and then connects to the mongodb.\r\n \"\"\"\r\n cp = ConfigParser()\r\n cp.read(configfile)\r\n\r\n host = cp.get(\"connection\", \"host\")\r\n port = int(cp.get(\"connection\", \"port\"))\r\n # pylint: disable=global-statement\r\n global mongodb\r\n mongodb = MongoClient(host, port)['cobbler']\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. Corrupt the `mongodb.conf`\r\n2. Start Cobbler\r\n3. See error\r\n\r\n### Expected behavior\r\n\r\nWe get a better exception and Cobbler is prevented from starting up.\r\n\r\n### Cobbler version\r\n\r\n<!--- Paste output from `cobbler version` -->\r\n````paste below\r\n````\r\n\r\n### Operating system\r\n\r\n<!--- On which operating system do you use Cobbler? -->\r\n\r\n### Cobbler log\r\n\r\n<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->\r\n````paste below\r\n````\r\n\r\n### Screenshots\r\n\r\n<!--- If applicable, add screenshots to help explain your problem. -->\r\n\r\n### Additional information\r\n\r\n<!--- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nCobbler's Mongo database based object serializer.\nExperimental version.\n\nCopyright 2006-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\nJames Cammarata <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nfrom configparser import ConfigParser\n\nfrom cobbler import settings\nfrom cobbler.cexceptions import CX\n\ntry:\n from pymongo import MongoClient\n from pymongo.errors import ConnectionFailure, ConfigurationError\n pymongo_loaded = True\nexcept ModuleNotFoundError:\n # FIXME: log message\n pymongo_loaded = False\n\nmongodb = None\n\n\ndef __connect(configfile: str = \"/etc/cobbler/mongodb.conf\"):\n \"\"\"\n Reads the config file for mongodb and then connects to the mongodb.\n \"\"\"\n cp = ConfigParser()\n cp.read(configfile)\n\n host = cp.get(\"connection\", \"host\")\n port = int(cp.get(\"connection\", \"port\"))\n # pylint: disable=global-statement\n global mongodb\n mongodb = MongoClient(host, port)['cobbler']\n try:\n # The ismaster command is cheap and doesn't require auth.\n mongodb.admin.command('ismaster')\n except ConnectionFailure as e:\n # FIXME: log error\n raise CX(\"Unable to connect to Mongo database or get database \\\"cobbler\\\"\") from e\n except ConfigurationError as e:\n raise CX(\"The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.\") from e\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n # FIXME: only run this if enabled.\n if not pymongo_loaded:\n return \"\"\n return \"serializer\"\n\n\ndef what() -> str:\n \"\"\"\n Module identification function\n \"\"\"\n return \"serializer/mongodb\"\n\n\ndef serialize_item(collection, item):\n \"\"\"\n Save a collection item to database.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n __connect()\n collection = mongodb[collection.collection_type()]\n data = collection.find_one({'name': item.name})\n if data:\n collection.update({'name': item.name}, item.serialize())\n else:\n collection.insert(item.serialize())\n\n\ndef serialize_delete(collection, item):\n \"\"\"\n Delete a collection item from database.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n __connect()\n collection = mongodb[collection.collection_type()]\n collection.remove({'name': item.name})\n\n\ndef serialize(collection):\n \"\"\"\n Save a collection to database\n\n :param collection: collection\n \"\"\"\n\n # TODO: error detection\n ctype = collection.collection_type()\n if ctype != \"settings\":\n for x in collection:\n serialize_item(collection, x)\n\n\ndef deserialize_raw(collection_type: str):\n \"\"\"\n Get a collection from mongodb and parse it into an object.\n\n :param collection_type: The collection type to fetch.\n :return: The first element of the collection requested.\n \"\"\"\n if collection_type == \"settings\":\n return settings.read_settings_file()\n else:\n __connect()\n collection = mongodb[collection_type]\n return collection.find()\n\n\ndef deserialize(collection, topological: bool = True):\n \"\"\"\n Load a collection from the database.\n\n :param collection: The collection to deserialize.\n :param topological: If the collection list should be sorted by the collection dict depth value or not.\n \"\"\"\n\n datastruct = deserialize_raw(collection.collection_type())\n if topological and type(datastruct) == list:\n datastruct.sort(key=lambda x: x[\"depth\"])\n if type(datastruct) == dict:\n collection.from_dict(datastruct)\n elif type(datastruct) == list:\n collection.from_list(datastruct)\n", "path": "cobbler/modules/serializers/mongodb.py"}]} | 2,212 | 715 |
gh_patches_debug_42727 | rasdani/github-patches | git_diff | getmoto__moto-2330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add trusted hosts / networks to Server mode
Currently (Java) AWS API can only work with Moto from the local network (if it starts with "192.168.X.X") or if the request is coming from the same host as the server, that's very limiting because i want to run Moto on a container in an external network.
Any chance to add trusted networks or hosts via config file or CLI args?
I'd appreciate any info if this had been addressed or i'm using it wrong
</issue>
<code>
[start of moto/server.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import json
5 import re
6 import sys
7 from threading import Lock
8
9 import six
10 from flask import Flask
11 from flask.testing import FlaskClient
12
13 from six.moves.urllib.parse import urlencode
14 from werkzeug.routing import BaseConverter
15 from werkzeug.serving import run_simple
16
17 from moto.backends import BACKENDS
18 from moto.core.utils import convert_flask_to_httpretty_response
19
20
21 HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"]
22
23
24 DEFAULT_SERVICE_REGION = ('s3', 'us-east-1')
25
26 # Map of unsigned calls to service-region as per AWS API docs
27 # https://docs.aws.amazon.com/cognito/latest/developerguide/resource-permissions.html#amazon-cognito-signed-versus-unsigned-apis
28 UNSIGNED_REQUESTS = {
29 'AWSCognitoIdentityService': ('cognito-identity', 'us-east-1'),
30 'AWSCognitoIdentityProviderService': ('cognito-idp', 'us-east-1'),
31 }
32
33
34 class DomainDispatcherApplication(object):
35 """
36 Dispatch requests to different applications based on the "Host:" header
37 value. We'll match the host header value with the url_bases of each backend.
38 """
39
40 def __init__(self, create_app, service=None):
41 self.create_app = create_app
42 self.lock = Lock()
43 self.app_instances = {}
44 self.service = service
45
46 def get_backend_for_host(self, host):
47 if host == 'moto_api':
48 return host
49
50 if self.service:
51 return self.service
52
53 if host in BACKENDS:
54 return host
55
56 for backend_name, backend in BACKENDS.items():
57 for url_base in list(backend.values())[0].url_bases:
58 if re.match(url_base, 'http://%s' % host):
59 return backend_name
60
61 raise RuntimeError('Invalid host: "%s"' % host)
62
63 def infer_service_region(self, environ):
64 auth = environ.get('HTTP_AUTHORIZATION')
65 if auth:
66 # Signed request
67 # Parse auth header to find service assuming a SigV4 request
68 # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
69 # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request']
70 try:
71 credential_scope = auth.split(",")[0].split()[1]
72 _, _, region, service, _ = credential_scope.split("/")
73 return service, region
74 except ValueError:
75 # Signature format does not match, this is exceptional and we can't
76 # infer a service-region. A reduced set of services still use
77 # the deprecated SigV2, ergo prefer S3 as most likely default.
78 # https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
79 return DEFAULT_SERVICE_REGION
80 else:
81 # Unsigned request
82 target = environ.get('HTTP_X_AMZ_TARGET')
83 if target:
84 service, _ = target.split('.', 1)
85 return UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION)
86 # S3 is the last resort when the target is also unknown
87 return DEFAULT_SERVICE_REGION
88
89 def get_application(self, environ):
90 path_info = environ.get('PATH_INFO', '')
91
92 # The URL path might contain non-ASCII text, for instance unicode S3 bucket names
93 if six.PY2 and isinstance(path_info, str):
94 path_info = six.u(path_info)
95 if six.PY3 and isinstance(path_info, six.binary_type):
96 path_info = path_info.decode('utf-8')
97
98 if path_info.startswith("/moto-api") or path_info == "/favicon.ico":
99 host = "moto_api"
100 elif path_info.startswith("/latest/meta-data/"):
101 host = "instance_metadata"
102 else:
103 host = environ['HTTP_HOST'].split(':')[0]
104 if host in {'localhost', 'motoserver'} or host.startswith("192.168."):
105 service, region = self.infer_service_region(environ)
106 if service == 'dynamodb':
107 if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'):
108 host = 'dynamodbstreams'
109 else:
110 dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0]
111 # If Newer API version, use dynamodb2
112 if dynamo_api_version > "20111205":
113 host = "dynamodb2"
114 else:
115 host = "{service}.{region}.amazonaws.com".format(
116 service=service, region=region)
117
118 with self.lock:
119 backend = self.get_backend_for_host(host)
120 app = self.app_instances.get(backend, None)
121 if app is None:
122 app = self.create_app(backend)
123 self.app_instances[backend] = app
124 return app
125
126 def __call__(self, environ, start_response):
127 backend_app = self.get_application(environ)
128 return backend_app(environ, start_response)
129
130
131 class RegexConverter(BaseConverter):
132 # http://werkzeug.pocoo.org/docs/routing/#custom-converters
133
134 def __init__(self, url_map, *items):
135 super(RegexConverter, self).__init__(url_map)
136 self.regex = items[0]
137
138
139 class AWSTestHelper(FlaskClient):
140
141 def action_data(self, action_name, **kwargs):
142 """
143 Method calls resource with action_name and returns data of response.
144 """
145 opts = {"Action": action_name}
146 opts.update(kwargs)
147 res = self.get("/?{0}".format(urlencode(opts)),
148 headers={"Host": "{0}.us-east-1.amazonaws.com".format(self.application.service)})
149 return res.data.decode("utf-8")
150
151 def action_json(self, action_name, **kwargs):
152 """
153 Method calls resource with action_name and returns object obtained via
154 deserialization of output.
155 """
156 return json.loads(self.action_data(action_name, **kwargs))
157
158
159 def create_backend_app(service):
160 from werkzeug.routing import Map
161
162 # Create the backend_app
163 backend_app = Flask(__name__)
164 backend_app.debug = True
165 backend_app.service = service
166
167 # Reset view functions to reset the app
168 backend_app.view_functions = {}
169 backend_app.url_map = Map()
170 backend_app.url_map.converters['regex'] = RegexConverter
171 backend = list(BACKENDS[service].values())[0]
172 for url_path, handler in backend.flask_paths.items():
173 if handler.__name__ == 'dispatch':
174 endpoint = '{0}.dispatch'.format(handler.__self__.__name__)
175 else:
176 endpoint = None
177
178 original_endpoint = endpoint
179 index = 2
180 while endpoint in backend_app.view_functions:
181 # HACK: Sometimes we map the same view to multiple url_paths. Flask
182 # requries us to have different names.
183 endpoint = original_endpoint + str(index)
184 index += 1
185
186 backend_app.add_url_rule(
187 url_path,
188 endpoint=endpoint,
189 methods=HTTP_METHODS,
190 view_func=convert_flask_to_httpretty_response(handler),
191 strict_slashes=False,
192 )
193
194 backend_app.test_client_class = AWSTestHelper
195 return backend_app
196
197
198 def main(argv=sys.argv[1:]):
199 parser = argparse.ArgumentParser()
200
201 # Keep this for backwards compat
202 parser.add_argument(
203 "service",
204 type=str,
205 nargs='?', # http://stackoverflow.com/a/4480202/731592
206 default=None)
207 parser.add_argument(
208 '-H', '--host', type=str,
209 help='Which host to bind',
210 default='127.0.0.1')
211 parser.add_argument(
212 '-p', '--port', type=int,
213 help='Port number to use for connection',
214 default=5000)
215 parser.add_argument(
216 '-r', '--reload',
217 action='store_true',
218 help='Reload server on a file change',
219 default=False
220 )
221 parser.add_argument(
222 '-s', '--ssl',
223 action='store_true',
224 help='Enable SSL encrypted connection with auto-generated certificate (use https://... URL)',
225 default=False
226 )
227 parser.add_argument(
228 '-c', '--ssl-cert', type=str,
229 help='Path to SSL certificate',
230 default=None)
231 parser.add_argument(
232 '-k', '--ssl-key', type=str,
233 help='Path to SSL private key',
234 default=None)
235
236 args = parser.parse_args(argv)
237
238 # Wrap the main application
239 main_app = DomainDispatcherApplication(
240 create_backend_app, service=args.service)
241 main_app.debug = True
242
243 ssl_context = None
244 if args.ssl_key and args.ssl_cert:
245 ssl_context = (args.ssl_cert, args.ssl_key)
246 elif args.ssl:
247 ssl_context = 'adhoc'
248
249 run_simple(args.host, args.port, main_app,
250 threaded=True, use_reloader=args.reload,
251 ssl_context=ssl_context)
252
253
254 if __name__ == '__main__':
255 main()
256
[end of moto/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/server.py b/moto/server.py
--- a/moto/server.py
+++ b/moto/server.py
@@ -58,9 +58,7 @@
if re.match(url_base, 'http://%s' % host):
return backend_name
- raise RuntimeError('Invalid host: "%s"' % host)
-
- def infer_service_region(self, environ):
+ def infer_service_region_host(self, environ):
auth = environ.get('HTTP_AUTHORIZATION')
if auth:
# Signed request
@@ -70,21 +68,35 @@
try:
credential_scope = auth.split(",")[0].split()[1]
_, _, region, service, _ = credential_scope.split("/")
- return service, region
except ValueError:
# Signature format does not match, this is exceptional and we can't
# infer a service-region. A reduced set of services still use
# the deprecated SigV2, ergo prefer S3 as most likely default.
# https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
- return DEFAULT_SERVICE_REGION
+ service, region = DEFAULT_SERVICE_REGION
else:
# Unsigned request
target = environ.get('HTTP_X_AMZ_TARGET')
if target:
service, _ = target.split('.', 1)
- return UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION)
- # S3 is the last resort when the target is also unknown
- return DEFAULT_SERVICE_REGION
+ service, region = UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION)
+ else:
+ # S3 is the last resort when the target is also unknown
+ service, region = DEFAULT_SERVICE_REGION
+
+ if service == 'dynamodb':
+ if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'):
+ host = 'dynamodbstreams'
+ else:
+ dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0]
+ # If Newer API version, use dynamodb2
+ if dynamo_api_version > "20111205":
+ host = "dynamodb2"
+ else:
+ host = "{service}.{region}.amazonaws.com".format(
+ service=service, region=region)
+
+ return host
def get_application(self, environ):
path_info = environ.get('PATH_INFO', '')
@@ -101,22 +113,14 @@
host = "instance_metadata"
else:
host = environ['HTTP_HOST'].split(':')[0]
- if host in {'localhost', 'motoserver'} or host.startswith("192.168."):
- service, region = self.infer_service_region(environ)
- if service == 'dynamodb':
- if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'):
- host = 'dynamodbstreams'
- else:
- dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0]
- # If Newer API version, use dynamodb2
- if dynamo_api_version > "20111205":
- host = "dynamodb2"
- else:
- host = "{service}.{region}.amazonaws.com".format(
- service=service, region=region)
with self.lock:
backend = self.get_backend_for_host(host)
+ if not backend:
+ # No regular backend found; try parsing other headers
+ host = self.infer_service_region_host(environ)
+ backend = self.get_backend_for_host(host)
+
app = self.app_instances.get(backend, None)
if app is None:
app = self.create_app(backend)
| {"golden_diff": "diff --git a/moto/server.py b/moto/server.py\n--- a/moto/server.py\n+++ b/moto/server.py\n@@ -58,9 +58,7 @@\n if re.match(url_base, 'http://%s' % host):\n return backend_name\n \n- raise RuntimeError('Invalid host: \"%s\"' % host)\n-\n- def infer_service_region(self, environ):\n+ def infer_service_region_host(self, environ):\n auth = environ.get('HTTP_AUTHORIZATION')\n if auth:\n # Signed request\n@@ -70,21 +68,35 @@\n try:\n credential_scope = auth.split(\",\")[0].split()[1]\n _, _, region, service, _ = credential_scope.split(\"/\")\n- return service, region\n except ValueError:\n # Signature format does not match, this is exceptional and we can't\n # infer a service-region. A reduced set of services still use\n # the deprecated SigV2, ergo prefer S3 as most likely default.\n # https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html\n- return DEFAULT_SERVICE_REGION\n+ service, region = DEFAULT_SERVICE_REGION\n else:\n # Unsigned request\n target = environ.get('HTTP_X_AMZ_TARGET')\n if target:\n service, _ = target.split('.', 1)\n- return UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION)\n- # S3 is the last resort when the target is also unknown\n- return DEFAULT_SERVICE_REGION\n+ service, region = UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION)\n+ else:\n+ # S3 is the last resort when the target is also unknown\n+ service, region = DEFAULT_SERVICE_REGION\n+\n+ if service == 'dynamodb':\n+ if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'):\n+ host = 'dynamodbstreams'\n+ else:\n+ dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split(\"_\")[1].split(\".\")[0]\n+ # If Newer API version, use dynamodb2\n+ if dynamo_api_version > \"20111205\":\n+ host = \"dynamodb2\"\n+ else:\n+ host = \"{service}.{region}.amazonaws.com\".format(\n+ service=service, region=region)\n+\n+ return host\n \n def get_application(self, environ):\n path_info = environ.get('PATH_INFO', '')\n@@ -101,22 +113,14 @@\n host = \"instance_metadata\"\n else:\n host = environ['HTTP_HOST'].split(':')[0]\n- if host in {'localhost', 'motoserver'} or host.startswith(\"192.168.\"):\n- service, region = self.infer_service_region(environ)\n- if service == 'dynamodb':\n- if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'):\n- host = 'dynamodbstreams'\n- else:\n- dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split(\"_\")[1].split(\".\")[0]\n- # If Newer API version, use dynamodb2\n- if dynamo_api_version > \"20111205\":\n- host = \"dynamodb2\"\n- else:\n- host = \"{service}.{region}.amazonaws.com\".format(\n- service=service, region=region)\n \n with self.lock:\n backend = self.get_backend_for_host(host)\n+ if not backend:\n+ # No regular backend found; try parsing other headers\n+ host = self.infer_service_region_host(environ)\n+ backend = self.get_backend_for_host(host)\n+\n app = self.app_instances.get(backend, None)\n if app is None:\n app = self.create_app(backend)\n", "issue": "Add trusted hosts / networks to Server mode\nCurrently (Java) AWS API can only work with Moto from the local network (if it starts with \"192.168.X.X\") or if the request is coming from the same host as the server, that's very limiting because i want to run Moto on a container in an external network.\r\n\r\nAny chance to add trusted networks or hosts via config file or CLI args?\r\n\r\nI'd appreciate any info if this had been addressed or i'm using it wrong\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport json\nimport re\nimport sys\nfrom threading import Lock\n\nimport six\nfrom flask import Flask\nfrom flask.testing import FlaskClient\n\nfrom six.moves.urllib.parse import urlencode\nfrom werkzeug.routing import BaseConverter\nfrom werkzeug.serving import run_simple\n\nfrom moto.backends import BACKENDS\nfrom moto.core.utils import convert_flask_to_httpretty_response\n\n\nHTTP_METHODS = [\"GET\", \"POST\", \"PUT\", \"DELETE\", \"HEAD\", \"PATCH\"]\n\n\nDEFAULT_SERVICE_REGION = ('s3', 'us-east-1')\n\n# Map of unsigned calls to service-region as per AWS API docs\n# https://docs.aws.amazon.com/cognito/latest/developerguide/resource-permissions.html#amazon-cognito-signed-versus-unsigned-apis\nUNSIGNED_REQUESTS = {\n 'AWSCognitoIdentityService': ('cognito-identity', 'us-east-1'),\n 'AWSCognitoIdentityProviderService': ('cognito-idp', 'us-east-1'),\n}\n\n\nclass DomainDispatcherApplication(object):\n \"\"\"\n Dispatch requests to different applications based on the \"Host:\" header\n value. We'll match the host header value with the url_bases of each backend.\n \"\"\"\n\n def __init__(self, create_app, service=None):\n self.create_app = create_app\n self.lock = Lock()\n self.app_instances = {}\n self.service = service\n\n def get_backend_for_host(self, host):\n if host == 'moto_api':\n return host\n\n if self.service:\n return self.service\n\n if host in BACKENDS:\n return host\n\n for backend_name, backend in BACKENDS.items():\n for url_base in list(backend.values())[0].url_bases:\n if re.match(url_base, 'http://%s' % host):\n return backend_name\n\n raise RuntimeError('Invalid host: \"%s\"' % host)\n\n def infer_service_region(self, environ):\n auth = environ.get('HTTP_AUTHORIZATION')\n if auth:\n # Signed request\n # Parse auth header to find service assuming a SigV4 request\n # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html\n # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request']\n try:\n credential_scope = auth.split(\",\")[0].split()[1]\n _, _, region, service, _ = credential_scope.split(\"/\")\n return service, region\n except ValueError:\n # Signature format does not match, this is exceptional and we can't\n # infer a service-region. A reduced set of services still use\n # the deprecated SigV2, ergo prefer S3 as most likely default.\n # https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html\n return DEFAULT_SERVICE_REGION\n else:\n # Unsigned request\n target = environ.get('HTTP_X_AMZ_TARGET')\n if target:\n service, _ = target.split('.', 1)\n return UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION)\n # S3 is the last resort when the target is also unknown\n return DEFAULT_SERVICE_REGION\n\n def get_application(self, environ):\n path_info = environ.get('PATH_INFO', '')\n\n # The URL path might contain non-ASCII text, for instance unicode S3 bucket names\n if six.PY2 and isinstance(path_info, str):\n path_info = six.u(path_info)\n if six.PY3 and isinstance(path_info, six.binary_type):\n path_info = path_info.decode('utf-8')\n\n if path_info.startswith(\"/moto-api\") or path_info == \"/favicon.ico\":\n host = \"moto_api\"\n elif path_info.startswith(\"/latest/meta-data/\"):\n host = \"instance_metadata\"\n else:\n host = environ['HTTP_HOST'].split(':')[0]\n if host in {'localhost', 'motoserver'} or host.startswith(\"192.168.\"):\n service, region = self.infer_service_region(environ)\n if service == 'dynamodb':\n if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'):\n host = 'dynamodbstreams'\n else:\n dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split(\"_\")[1].split(\".\")[0]\n # If Newer API version, use dynamodb2\n if dynamo_api_version > \"20111205\":\n host = \"dynamodb2\"\n else:\n host = \"{service}.{region}.amazonaws.com\".format(\n service=service, region=region)\n\n with self.lock:\n backend = self.get_backend_for_host(host)\n app = self.app_instances.get(backend, None)\n if app is None:\n app = self.create_app(backend)\n self.app_instances[backend] = app\n return app\n\n def __call__(self, environ, start_response):\n backend_app = self.get_application(environ)\n return backend_app(environ, start_response)\n\n\nclass RegexConverter(BaseConverter):\n # http://werkzeug.pocoo.org/docs/routing/#custom-converters\n\n def __init__(self, url_map, *items):\n super(RegexConverter, self).__init__(url_map)\n self.regex = items[0]\n\n\nclass AWSTestHelper(FlaskClient):\n\n def action_data(self, action_name, **kwargs):\n \"\"\"\n Method calls resource with action_name and returns data of response.\n \"\"\"\n opts = {\"Action\": action_name}\n opts.update(kwargs)\n res = self.get(\"/?{0}\".format(urlencode(opts)),\n headers={\"Host\": \"{0}.us-east-1.amazonaws.com\".format(self.application.service)})\n return res.data.decode(\"utf-8\")\n\n def action_json(self, action_name, **kwargs):\n \"\"\"\n Method calls resource with action_name and returns object obtained via\n deserialization of output.\n \"\"\"\n return json.loads(self.action_data(action_name, **kwargs))\n\n\ndef create_backend_app(service):\n from werkzeug.routing import Map\n\n # Create the backend_app\n backend_app = Flask(__name__)\n backend_app.debug = True\n backend_app.service = service\n\n # Reset view functions to reset the app\n backend_app.view_functions = {}\n backend_app.url_map = Map()\n backend_app.url_map.converters['regex'] = RegexConverter\n backend = list(BACKENDS[service].values())[0]\n for url_path, handler in backend.flask_paths.items():\n if handler.__name__ == 'dispatch':\n endpoint = '{0}.dispatch'.format(handler.__self__.__name__)\n else:\n endpoint = None\n\n original_endpoint = endpoint\n index = 2\n while endpoint in backend_app.view_functions:\n # HACK: Sometimes we map the same view to multiple url_paths. Flask\n # requries us to have different names.\n endpoint = original_endpoint + str(index)\n index += 1\n\n backend_app.add_url_rule(\n url_path,\n endpoint=endpoint,\n methods=HTTP_METHODS,\n view_func=convert_flask_to_httpretty_response(handler),\n strict_slashes=False,\n )\n\n backend_app.test_client_class = AWSTestHelper\n return backend_app\n\n\ndef main(argv=sys.argv[1:]):\n parser = argparse.ArgumentParser()\n\n # Keep this for backwards compat\n parser.add_argument(\n \"service\",\n type=str,\n nargs='?', # http://stackoverflow.com/a/4480202/731592\n default=None)\n parser.add_argument(\n '-H', '--host', type=str,\n help='Which host to bind',\n default='127.0.0.1')\n parser.add_argument(\n '-p', '--port', type=int,\n help='Port number to use for connection',\n default=5000)\n parser.add_argument(\n '-r', '--reload',\n action='store_true',\n help='Reload server on a file change',\n default=False\n )\n parser.add_argument(\n '-s', '--ssl',\n action='store_true',\n help='Enable SSL encrypted connection with auto-generated certificate (use https://... URL)',\n default=False\n )\n parser.add_argument(\n '-c', '--ssl-cert', type=str,\n help='Path to SSL certificate',\n default=None)\n parser.add_argument(\n '-k', '--ssl-key', type=str,\n help='Path to SSL private key',\n default=None)\n\n args = parser.parse_args(argv)\n\n # Wrap the main application\n main_app = DomainDispatcherApplication(\n create_backend_app, service=args.service)\n main_app.debug = True\n\n ssl_context = None\n if args.ssl_key and args.ssl_cert:\n ssl_context = (args.ssl_cert, args.ssl_key)\n elif args.ssl:\n ssl_context = 'adhoc'\n\n run_simple(args.host, args.port, main_app,\n threaded=True, use_reloader=args.reload,\n ssl_context=ssl_context)\n\n\nif __name__ == '__main__':\n main()\n", "path": "moto/server.py"}]} | 3,278 | 832 |
gh_patches_debug_37710 | rasdani/github-patches | git_diff | localstack__localstack-4575 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: State Machine references don't get resolved properly
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Lambda refs get lost
### Expected Behavior
Lambda refs work in state machines
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker run localstack/localstack
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
awslocal s3 mb s3://mybucket
### Environment
```markdown
- OS:
- LocalStack:
```
### Anything else?
This is based on a conversation I had with @dominikschubert
</issue>
<code>
[start of localstack/utils/generic/wait_utils.py]
1 import time
2 from typing import Callable
3
4 from typing_extensions import Literal
5
6
7 def wait_until(
8 fn: Callable[[], bool],
9 wait: float = 1.0,
10 max_retries: int = 10,
11 strategy: Literal["exponential", "static", "linear"] = "exponential",
12 _retries: int = 0,
13 _max_wait: float = 240,
14 ) -> None:
15 """waits until a given condition is true, rechecking it periodically"""
16 if max_retries < _retries:
17 return
18 completed = fn()
19 if not completed:
20 if wait > _max_wait:
21 return
22 time.sleep(wait)
23 next_wait = wait # default: static
24 if strategy == "linear":
25 next_wait = (wait / _retries) * (_retries + 1)
26 elif strategy == "exponential":
27 next_wait = wait ** 2
28 wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)
29
[end of localstack/utils/generic/wait_utils.py]
[start of localstack/services/cloudformation/models/stepfunctions.py]
1 from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME
2 from localstack.services.cloudformation.service_models import GenericBaseModel
3 from localstack.utils.aws import aws_stack
4
5
6 class SFNActivity(GenericBaseModel):
7 @staticmethod
8 def cloudformation_type():
9 return "AWS::StepFunctions::Activity"
10
11 def fetch_state(self, stack_name, resources):
12 activity_arn = self.physical_resource_id
13 if not activity_arn:
14 return None
15 client = aws_stack.connect_to_service("stepfunctions")
16 result = client.describe_activity(activityArn=activity_arn)
17 return result
18
19 @staticmethod
20 def get_deploy_templates():
21 return {
22 "create": {
23 "function": "create_activity",
24 "parameters": {"name": ["Name", PLACEHOLDER_RESOURCE_NAME], "tags": "Tags"},
25 },
26 "delete": {
27 "function": "delete_activity",
28 "parameters": {"activityArn": "PhysicalResourceId"},
29 },
30 }
31
32
33 class SFNStateMachine(GenericBaseModel):
34 @staticmethod
35 def cloudformation_type():
36 return "AWS::StepFunctions::StateMachine"
37
38 def get_resource_name(self):
39 return self.props.get("StateMachineName")
40
41 def get_physical_resource_id(self, attribute=None, **kwargs):
42 return self.props.get("stateMachineArn")
43
44 def fetch_state(self, stack_name, resources):
45 sm_name = self.props.get("StateMachineName") or self.resource_id
46 sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)
47 sfn_client = aws_stack.connect_to_service("stepfunctions")
48 state_machines = sfn_client.list_state_machines()["stateMachines"]
49 sm_arn = [m["stateMachineArn"] for m in state_machines if m["name"] == sm_name]
50 if not sm_arn:
51 return None
52 result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])
53 return result
54
55 def update_resource(self, new_resource, stack_name, resources):
56 props = new_resource["Properties"]
57 client = aws_stack.connect_to_service("stepfunctions")
58 sm_arn = self.props.get("stateMachineArn")
59 if not sm_arn:
60 self.state = self.fetch_state(stack_name=stack_name, resources=resources)
61 sm_arn = self.state["stateMachineArn"]
62 kwargs = {
63 "stateMachineArn": sm_arn,
64 "definition": props["DefinitionString"],
65 }
66 return client.update_state_machine(**kwargs)
67
68 @staticmethod
69 def get_deploy_templates():
70 return {
71 "create": {
72 "function": "create_state_machine",
73 "parameters": {
74 "name": ["StateMachineName", PLACEHOLDER_RESOURCE_NAME],
75 "definition": "DefinitionString",
76 "roleArn": "RoleArn",
77 },
78 },
79 "delete": {
80 "function": "delete_state_machine",
81 "parameters": {"stateMachineArn": "PhysicalResourceId"},
82 },
83 }
84
[end of localstack/services/cloudformation/models/stepfunctions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/services/cloudformation/models/stepfunctions.py b/localstack/services/cloudformation/models/stepfunctions.py
--- a/localstack/services/cloudformation/models/stepfunctions.py
+++ b/localstack/services/cloudformation/models/stepfunctions.py
@@ -1,3 +1,6 @@
+import re
+from typing import Dict
+
from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME
from localstack.services.cloudformation.service_models import GenericBaseModel
from localstack.utils.aws import aws_stack
@@ -65,19 +68,43 @@
}
return client.update_state_machine(**kwargs)
- @staticmethod
- def get_deploy_templates():
+ @classmethod
+ def get_deploy_templates(cls):
+ def _create_params(params, **kwargs):
+ def _get_definition(params):
+ definition_str = params.get("DefinitionString")
+ substitutions = params.get("DefinitionSubstitutions")
+ if substitutions is not None:
+ definition_str = _apply_substitutions(definition_str, substitutions)
+ return definition_str
+
+ return {
+ "name": params.get("StateMachineName", PLACEHOLDER_RESOURCE_NAME),
+ "definition": _get_definition(params),
+ "roleArn": params.get("RoleArn"),
+ "type": params.get("StateMachineTyp", None),
+ }
+
return {
"create": {
"function": "create_state_machine",
- "parameters": {
- "name": ["StateMachineName", PLACEHOLDER_RESOURCE_NAME],
- "definition": "DefinitionString",
- "roleArn": "RoleArn",
- },
+ "parameters": _create_params,
},
"delete": {
"function": "delete_state_machine",
"parameters": {"stateMachineArn": "PhysicalResourceId"},
},
}
+
+
+def _apply_substitutions(definition: str, substitutions: Dict[str, str]) -> str:
+ substitution_regex = re.compile("\\${[a-zA-Z0-9_]+}") # might be a bit too strict in some cases
+ tokens = substitution_regex.findall(definition)
+ result = definition
+ for token in tokens:
+ raw_token = token[2:-1] # strip ${ and }
+ if raw_token not in substitutions.keys():
+ raise
+ result = result.replace(token, substitutions[raw_token])
+
+ return result
diff --git a/localstack/utils/generic/wait_utils.py b/localstack/utils/generic/wait_utils.py
--- a/localstack/utils/generic/wait_utils.py
+++ b/localstack/utils/generic/wait_utils.py
@@ -24,5 +24,5 @@
if strategy == "linear":
next_wait = (wait / _retries) * (_retries + 1)
elif strategy == "exponential":
- next_wait = wait ** 2
+ next_wait = wait * 2
wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)
| {"golden_diff": "diff --git a/localstack/services/cloudformation/models/stepfunctions.py b/localstack/services/cloudformation/models/stepfunctions.py\n--- a/localstack/services/cloudformation/models/stepfunctions.py\n+++ b/localstack/services/cloudformation/models/stepfunctions.py\n@@ -1,3 +1,6 @@\n+import re\n+from typing import Dict\n+\n from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME\n from localstack.services.cloudformation.service_models import GenericBaseModel\n from localstack.utils.aws import aws_stack\n@@ -65,19 +68,43 @@\n }\n return client.update_state_machine(**kwargs)\n \n- @staticmethod\n- def get_deploy_templates():\n+ @classmethod\n+ def get_deploy_templates(cls):\n+ def _create_params(params, **kwargs):\n+ def _get_definition(params):\n+ definition_str = params.get(\"DefinitionString\")\n+ substitutions = params.get(\"DefinitionSubstitutions\")\n+ if substitutions is not None:\n+ definition_str = _apply_substitutions(definition_str, substitutions)\n+ return definition_str\n+\n+ return {\n+ \"name\": params.get(\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME),\n+ \"definition\": _get_definition(params),\n+ \"roleArn\": params.get(\"RoleArn\"),\n+ \"type\": params.get(\"StateMachineTyp\", None),\n+ }\n+\n return {\n \"create\": {\n \"function\": \"create_state_machine\",\n- \"parameters\": {\n- \"name\": [\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME],\n- \"definition\": \"DefinitionString\",\n- \"roleArn\": \"RoleArn\",\n- },\n+ \"parameters\": _create_params,\n },\n \"delete\": {\n \"function\": \"delete_state_machine\",\n \"parameters\": {\"stateMachineArn\": \"PhysicalResourceId\"},\n },\n }\n+\n+\n+def _apply_substitutions(definition: str, substitutions: Dict[str, str]) -> str:\n+ substitution_regex = re.compile(\"\\\\${[a-zA-Z0-9_]+}\") # might be a bit too strict in some cases\n+ tokens = substitution_regex.findall(definition)\n+ result = definition\n+ for token in tokens:\n+ raw_token = token[2:-1] # strip ${ and }\n+ if raw_token not in substitutions.keys():\n+ raise\n+ result = result.replace(token, substitutions[raw_token])\n+\n+ return result\ndiff --git a/localstack/utils/generic/wait_utils.py b/localstack/utils/generic/wait_utils.py\n--- a/localstack/utils/generic/wait_utils.py\n+++ b/localstack/utils/generic/wait_utils.py\n@@ -24,5 +24,5 @@\n if strategy == \"linear\":\n next_wait = (wait / _retries) * (_retries + 1)\n elif strategy == \"exponential\":\n- next_wait = wait ** 2\n+ next_wait = wait * 2\n wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)\n", "issue": "bug: State Machine references don't get resolved properly\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nLambda refs get lost\n\n### Expected Behavior\n\nLambda refs work in state machines\n\n### How are you starting LocalStack?\n\nWith a docker-compose file\n\n### Steps To Reproduce\n\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\n docker run localstack/localstack\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n awslocal s3 mb s3://mybucket\r\n\n\n### Environment\n\n```markdown\n- OS: \r\n- LocalStack:\n```\n\n\n### Anything else?\n\nThis is based on a conversation I had with @dominikschubert \n", "before_files": [{"content": "import time\nfrom typing import Callable\n\nfrom typing_extensions import Literal\n\n\ndef wait_until(\n fn: Callable[[], bool],\n wait: float = 1.0,\n max_retries: int = 10,\n strategy: Literal[\"exponential\", \"static\", \"linear\"] = \"exponential\",\n _retries: int = 0,\n _max_wait: float = 240,\n) -> None:\n \"\"\"waits until a given condition is true, rechecking it periodically\"\"\"\n if max_retries < _retries:\n return\n completed = fn()\n if not completed:\n if wait > _max_wait:\n return\n time.sleep(wait)\n next_wait = wait # default: static\n if strategy == \"linear\":\n next_wait = (wait / _retries) * (_retries + 1)\n elif strategy == \"exponential\":\n next_wait = wait ** 2\n wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)\n", "path": "localstack/utils/generic/wait_utils.py"}, {"content": "from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME\nfrom localstack.services.cloudformation.service_models import GenericBaseModel\nfrom localstack.utils.aws import aws_stack\n\n\nclass SFNActivity(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::Activity\"\n\n def fetch_state(self, stack_name, resources):\n activity_arn = self.physical_resource_id\n if not activity_arn:\n return None\n client = aws_stack.connect_to_service(\"stepfunctions\")\n result = client.describe_activity(activityArn=activity_arn)\n return result\n\n @staticmethod\n def get_deploy_templates():\n return {\n \"create\": {\n \"function\": \"create_activity\",\n \"parameters\": {\"name\": [\"Name\", PLACEHOLDER_RESOURCE_NAME], \"tags\": \"Tags\"},\n },\n \"delete\": {\n \"function\": \"delete_activity\",\n \"parameters\": {\"activityArn\": \"PhysicalResourceId\"},\n },\n }\n\n\nclass SFNStateMachine(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::StateMachine\"\n\n def get_resource_name(self):\n return self.props.get(\"StateMachineName\")\n\n def get_physical_resource_id(self, attribute=None, **kwargs):\n return self.props.get(\"stateMachineArn\")\n\n def fetch_state(self, stack_name, resources):\n sm_name = self.props.get(\"StateMachineName\") or self.resource_id\n sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)\n sfn_client = aws_stack.connect_to_service(\"stepfunctions\")\n state_machines = sfn_client.list_state_machines()[\"stateMachines\"]\n sm_arn = [m[\"stateMachineArn\"] for m in state_machines if m[\"name\"] == sm_name]\n if not sm_arn:\n return None\n result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])\n return result\n\n def update_resource(self, new_resource, stack_name, resources):\n props = new_resource[\"Properties\"]\n client = aws_stack.connect_to_service(\"stepfunctions\")\n sm_arn = self.props.get(\"stateMachineArn\")\n if not sm_arn:\n self.state = self.fetch_state(stack_name=stack_name, resources=resources)\n sm_arn = self.state[\"stateMachineArn\"]\n kwargs = {\n \"stateMachineArn\": sm_arn,\n \"definition\": props[\"DefinitionString\"],\n }\n return client.update_state_machine(**kwargs)\n\n @staticmethod\n def get_deploy_templates():\n return {\n \"create\": {\n \"function\": \"create_state_machine\",\n \"parameters\": {\n \"name\": [\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME],\n \"definition\": \"DefinitionString\",\n \"roleArn\": \"RoleArn\",\n },\n },\n \"delete\": {\n \"function\": \"delete_state_machine\",\n \"parameters\": {\"stateMachineArn\": \"PhysicalResourceId\"},\n },\n }\n", "path": "localstack/services/cloudformation/models/stepfunctions.py"}]} | 1,818 | 654 |
gh_patches_debug_32187 | rasdani/github-patches | git_diff | beeware__toga-1307 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Toga slider widget in android has 1 fewer tick counts than in Windows
**Describe the bug**
The toga.Slider widget when defined with range=(2,20), tick_count=19 instantiates a slider in Windows which has 19 tick values.
However when the same code is built and run for Android (both in the emulator and an actual Android phone), the slider only
has 18 tick values. In addition ,the property Slider.value returns a value which is 18/19 per tick.
**To Reproduce**
I wrote a simple example which demonstrates the bug clearly. I build and compiled like this:
briefcase update android
briefcase build android
briefcase run android
Here is the source code from app.py.
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
TIMER_MIN=1
TIMER_MAX=19
class slidetest(toga.App):
def startup(self):
main_box = toga.Box()
self.timer_label=toga.Label(text="Counter: ")
self.timer_label.style.width=200
self.timer_label.style.padding=5
self.timer_slide=toga.Slider(range=(TIMER_MIN,TIMER_MAX),tick_count=(TIMER_MAX-TIMER_MIN+1),on_change=(self.timer_change),
on_release=(self.timer_set))
self.timer_slide.style.padding=5
self.timer_slide.style.padding_top=5
self.timer_slide.style.width=200
self.timer_slide.value=10.0
main_box.add(self.timer_slide)
main_box.add(self.timer_label)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def timer_change(self,widget):
self.timer_label.text="Counter: "+str(self.timer_slide.tick_value)
def timer_set(self,widget):
pass
def main():
return slidetest()
**Expected behavior**
This should produce a slider with 19 tick stops, and return values from 2.0 to 20.0.
The android build of this code has a slide with 18 tick stops and return values from 2.0 to 19.05263.
Screenshots below just show screen capture of the android emulator with slider full left and full right position. You can see the resulting tick_value is 1 and 18 respectively.
**Screenshots**


**Environment:**
- Operating System: Windows
- Python version:3.8
- Software versions:
- Briefcase: briefcase 0.3.5
- Toga:
- toga-core 0.3.0.dev27
- toga-winforms 0.3.0.dev27
arrow 1.1.0
binaryornot 0.4.4
certifi 2021.5.30
chardet 4.0.0
click 8.0.1
colorama 0.4.4
cookiecutter 1.7.3
gitdb 4.0.7
GitPython 3.1.17
idna 2.10
Jinja2 2.11.3
jinja2-time 0.2.0
MarkupSafe 2.0.1
oauthlib 3.1.1
pip 21.1.1
poyo 0.5.0
pycparser 2.20
python-dateutil 2.8.1
python-slugify 5.0.2
pythonnet 2.5.2
setuptools 56.0.0
six 1.16.0
smmap 4.0.0
text-unidecode 1.3
toml 0.10.2
travertino 0.1.3
urllib3 1.26.5
wheel 0.36.2
**Additional context**
Add any other context about the problem here.
Toga slider widget in android has 1 fewer tick counts than in Windows
**Describe the bug**
The toga.Slider widget when defined with range=(2,20), tick_count=19 instantiates a slider in Windows which has 19 tick values.
However when the same code is built and run for Android (both in the emulator and an actual Android phone), the slider only
has 18 tick values. In addition ,the property Slider.value returns a value which is 18/19 per tick.
**To Reproduce**
I wrote a simple example which demonstrates the bug clearly. I build and compiled like this:
briefcase update android
briefcase build android
briefcase run android
Here is the source code from app.py.
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
TIMER_MIN=1
TIMER_MAX=19
class slidetest(toga.App):
def startup(self):
main_box = toga.Box()
self.timer_label=toga.Label(text="Counter: ")
self.timer_label.style.width=200
self.timer_label.style.padding=5
self.timer_slide=toga.Slider(range=(TIMER_MIN,TIMER_MAX),tick_count=(TIMER_MAX-TIMER_MIN+1),on_change=(self.timer_change),
on_release=(self.timer_set))
self.timer_slide.style.padding=5
self.timer_slide.style.padding_top=5
self.timer_slide.style.width=200
self.timer_slide.value=10.0
main_box.add(self.timer_slide)
main_box.add(self.timer_label)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def timer_change(self,widget):
self.timer_label.text="Counter: "+str(self.timer_slide.tick_value)
def timer_set(self,widget):
pass
def main():
return slidetest()
**Expected behavior**
This should produce a slider with 19 tick stops, and return values from 2.0 to 20.0.
The android build of this code has a slide with 18 tick stops and return values from 2.0 to 19.05263.
Screenshots below just show screen capture of the android emulator with slider full left and full right position. You can see the resulting tick_value is 1 and 18 respectively.
**Screenshots**


**Environment:**
- Operating System: Windows
- Python version:3.8
- Software versions:
- Briefcase: briefcase 0.3.5
- Toga:
- toga-core 0.3.0.dev27
- toga-winforms 0.3.0.dev27
arrow 1.1.0
binaryornot 0.4.4
certifi 2021.5.30
chardet 4.0.0
click 8.0.1
colorama 0.4.4
cookiecutter 1.7.3
gitdb 4.0.7
GitPython 3.1.17
idna 2.10
Jinja2 2.11.3
jinja2-time 0.2.0
MarkupSafe 2.0.1
oauthlib 3.1.1
pip 21.1.1
poyo 0.5.0
pycparser 2.20
python-dateutil 2.8.1
python-slugify 5.0.2
pythonnet 2.5.2
setuptools 56.0.0
six 1.16.0
smmap 4.0.0
text-unidecode 1.3
toml 0.10.2
travertino 0.1.3
urllib3 1.26.5
wheel 0.36.2
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of src/android/toga_android/widgets/slider.py]
1 from travertino.size import at_least
2
3 from ..libs.android.view import View__MeasureSpec
4 from ..libs.android.widget import SeekBar, SeekBar__OnSeekBarChangeListener
5 from .base import Widget
6
7
8 class TogaOnSeekBarChangeListener(SeekBar__OnSeekBarChangeListener):
9 def __init__(self, impl):
10 super().__init__()
11 self.impl = impl
12
13 def onProgressChanged(self, _view, _progress, _from_user):
14 if self.impl.interface.on_change:
15 self.impl.interface.on_change(widget=self.impl.interface)
16
17 # Add two unused methods so that the Java interface is completely implemented.
18 def onStartTrackingTouch(self, native_seekbar):
19 pass
20
21 def onStopTrackingTouch(self, native_seekbar):
22 pass
23
24
25 # Since Android's SeekBar is always discrete,
26 # use a high number of steps for a "continuous" slider.
27 DEFAULT_NUMBER_OF_TICKS = 10000
28
29
30 class Slider(Widget):
31 def create(self):
32 self.native = SeekBar(self._native_activity)
33 self.native.setMax(DEFAULT_NUMBER_OF_TICKS)
34 self.native.setOnSeekBarChangeListener(TogaOnSeekBarChangeListener(self))
35
36 def get_value(self):
37 minimum, maximum = self.interface.range
38 n_steps = self.interface.tick_count
39 if n_steps is None:
40 n_steps = DEFAULT_NUMBER_OF_TICKS
41 return (self.native.getProgress() * (maximum - minimum) / n_steps) + minimum
42
43 def set_value(self, value):
44 minimum, maximum = self.interface.range
45 n_steps = self.interface.tick_count
46 if n_steps is None:
47 n_steps = DEFAULT_NUMBER_OF_TICKS
48 self.native.setProgress(int((maximum - value - minimum) * n_steps))
49
50 def set_range(self, range):
51 pass
52
53 def set_tick_count(self, tick_count):
54 if tick_count is None:
55 self.native.setMax(DEFAULT_NUMBER_OF_TICKS)
56 else:
57 self.native.setMax(int(tick_count) - 1)
58
59 def rehint(self):
60 self.native.measure(
61 View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED
62 )
63 self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())
64 self.interface.intrinsic.height = self.native.getMeasuredHeight()
65
66 def set_on_change(self, handler):
67 # No special handling required
68 pass
69
70 def set_on_press(self, handler):
71 self.interface.factory.not_implemented("Slider.set_on_press()")
72
73 def set_on_release(self, handler):
74 self.interface.factory.not_implemented("Slider.set_on_release()")
75
[end of src/android/toga_android/widgets/slider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/android/toga_android/widgets/slider.py b/src/android/toga_android/widgets/slider.py
--- a/src/android/toga_android/widgets/slider.py
+++ b/src/android/toga_android/widgets/slider.py
@@ -35,26 +35,37 @@
def get_value(self):
minimum, maximum = self.interface.range
- n_steps = self.interface.tick_count
- if n_steps is None:
- n_steps = DEFAULT_NUMBER_OF_TICKS
- return (self.native.getProgress() * (maximum - minimum) / n_steps) + minimum
+ if self.interface.tick_count is not None and self.interface.tick_count <= 1:
+ return minimum
+ toga_tick_count = self.interface.tick_count or DEFAULT_NUMBER_OF_TICKS
+ android_slider_max = toga_tick_count - 1
+ tick_factor = (maximum - minimum) / android_slider_max
+ progress_scaled = self.native.getProgress() * tick_factor
+ result = progress_scaled + minimum
+ return result
def set_value(self, value):
minimum, maximum = self.interface.range
- n_steps = self.interface.tick_count
- if n_steps is None:
- n_steps = DEFAULT_NUMBER_OF_TICKS
- self.native.setProgress(int((maximum - value - minimum) * n_steps))
+ if self.interface.tick_count is not None and self.interface.tick_count <= 1:
+ android_progress = 0
+ else:
+ toga_tick_count = self.interface.tick_count or DEFAULT_NUMBER_OF_TICKS
+ android_slider_max = toga_tick_count - 1
+ tick_factor = (maximum - minimum) / android_slider_max
+ android_progress = int((value - minimum) * tick_factor)
+ self.native.setProgress(android_progress)
def set_range(self, range):
pass
def set_tick_count(self, tick_count):
- if tick_count is None:
- self.native.setMax(DEFAULT_NUMBER_OF_TICKS)
+ # Since the Android slider slides from 0 to max inclusive, always subtract 1 from tick_count.
+ if self.interface.tick_count is None:
+ android_slider_max = DEFAULT_NUMBER_OF_TICKS - 1
else:
- self.native.setMax(int(tick_count) - 1)
+ android_slider_max = int(self.interface.tick_count - 1)
+ # Set the Android SeekBar max, clamping so it's non-negative.
+ self.native.setMax(max(0, android_slider_max))
def rehint(self):
self.native.measure(
| {"golden_diff": "diff --git a/src/android/toga_android/widgets/slider.py b/src/android/toga_android/widgets/slider.py\n--- a/src/android/toga_android/widgets/slider.py\n+++ b/src/android/toga_android/widgets/slider.py\n@@ -35,26 +35,37 @@\n \n def get_value(self):\n minimum, maximum = self.interface.range\n- n_steps = self.interface.tick_count\n- if n_steps is None:\n- n_steps = DEFAULT_NUMBER_OF_TICKS\n- return (self.native.getProgress() * (maximum - minimum) / n_steps) + minimum\n+ if self.interface.tick_count is not None and self.interface.tick_count <= 1:\n+ return minimum\n+ toga_tick_count = self.interface.tick_count or DEFAULT_NUMBER_OF_TICKS\n+ android_slider_max = toga_tick_count - 1\n+ tick_factor = (maximum - minimum) / android_slider_max\n+ progress_scaled = self.native.getProgress() * tick_factor\n+ result = progress_scaled + minimum\n+ return result\n \n def set_value(self, value):\n minimum, maximum = self.interface.range\n- n_steps = self.interface.tick_count\n- if n_steps is None:\n- n_steps = DEFAULT_NUMBER_OF_TICKS\n- self.native.setProgress(int((maximum - value - minimum) * n_steps))\n+ if self.interface.tick_count is not None and self.interface.tick_count <= 1:\n+ android_progress = 0\n+ else:\n+ toga_tick_count = self.interface.tick_count or DEFAULT_NUMBER_OF_TICKS\n+ android_slider_max = toga_tick_count - 1\n+ tick_factor = (maximum - minimum) / android_slider_max\n+ android_progress = int((value - minimum) * tick_factor)\n+ self.native.setProgress(android_progress)\n \n def set_range(self, range):\n pass\n \n def set_tick_count(self, tick_count):\n- if tick_count is None:\n- self.native.setMax(DEFAULT_NUMBER_OF_TICKS)\n+ # Since the Android slider slides from 0 to max inclusive, always subtract 1 from tick_count.\n+ if self.interface.tick_count is None:\n+ android_slider_max = DEFAULT_NUMBER_OF_TICKS - 1\n else:\n- self.native.setMax(int(tick_count) - 1)\n+ android_slider_max = int(self.interface.tick_count - 1)\n+ # Set the Android SeekBar max, clamping so it's non-negative.\n+ self.native.setMax(max(0, android_slider_max))\n \n def rehint(self):\n self.native.measure(\n", "issue": "Toga slider widget in android has 1 fewer tick counts than in Windows\n**Describe the bug**\r\nThe toga.Slider widget when defined with range=(2,20), tick_count=19 instantiates a slider in Windows which has 19 tick values.\r\nHowever when the same code is built and run for Android (both in the emulator and an actual Android phone), the slider only\r\nhas 18 tick values. In addition ,the property Slider.value returns a value which is 18/19 per tick.\r\n\r\n**To Reproduce**\r\nI wrote a simple example which demonstrates the bug clearly. I build and compiled like this:\r\n briefcase update android\r\n briefcase build android\r\n briefcase run android\r\n\r\nHere is the source code from app.py.\r\n\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nTIMER_MIN=1\r\nTIMER_MAX=19\r\n\r\nclass slidetest(toga.App):\r\n\r\n def startup(self):\r\n main_box = toga.Box()\r\n\r\n self.timer_label=toga.Label(text=\"Counter: \")\r\n self.timer_label.style.width=200\r\n self.timer_label.style.padding=5\r\n\r\n self.timer_slide=toga.Slider(range=(TIMER_MIN,TIMER_MAX),tick_count=(TIMER_MAX-TIMER_MIN+1),on_change=(self.timer_change),\r\n on_release=(self.timer_set))\r\n self.timer_slide.style.padding=5\r\n self.timer_slide.style.padding_top=5\r\n self.timer_slide.style.width=200\r\n self.timer_slide.value=10.0\r\n\r\n main_box.add(self.timer_slide)\r\n main_box.add(self.timer_label)\r\n\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = main_box\r\n self.main_window.show()\r\n\r\n def timer_change(self,widget):\r\n self.timer_label.text=\"Counter: \"+str(self.timer_slide.tick_value)\r\n\r\n def timer_set(self,widget):\r\n pass\r\n\r\ndef main():\r\n return slidetest()\r\n\r\n\r\n**Expected behavior**\r\nThis should produce a slider with 19 tick stops, and return values from 2.0 to 20.0. \r\n\r\nThe android build of this code has a slide with 18 tick stops and return values from 2.0 to 19.05263.\r\n\r\nScreenshots below just show screen capture of the android emulator with slider full left and full right position. You can see the resulting tick_value is 1 and 18 respectively.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n**Environment:**\r\n - Operating System: Windows\r\n - Python version:3.8\r\n - Software versions:\r\n - Briefcase: briefcase 0.3.5\r\n - Toga:\r\n - toga-core 0.3.0.dev27\r\n - toga-winforms 0.3.0.dev27\r\n \r\narrow 1.1.0\r\nbinaryornot 0.4.4\r\ncertifi 2021.5.30\r\nchardet 4.0.0\r\nclick 8.0.1\r\ncolorama 0.4.4\r\ncookiecutter 1.7.3\r\ngitdb 4.0.7\r\nGitPython 3.1.17\r\nidna 2.10\r\nJinja2 2.11.3\r\njinja2-time 0.2.0\r\nMarkupSafe 2.0.1\r\noauthlib 3.1.1\r\npip 21.1.1\r\npoyo 0.5.0\r\npycparser 2.20\r\npython-dateutil 2.8.1\r\npython-slugify 5.0.2\r\npythonnet 2.5.2\r\nsetuptools 56.0.0\r\nsix 1.16.0\r\nsmmap 4.0.0\r\ntext-unidecode 1.3\r\ntoml 0.10.2\r\ntravertino 0.1.3\r\nurllib3 1.26.5\r\nwheel 0.36.2\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\nToga slider widget in android has 1 fewer tick counts than in Windows\n**Describe the bug**\r\nThe toga.Slider widget when defined with range=(2,20), tick_count=19 instantiates a slider in Windows which has 19 tick values.\r\nHowever when the same code is built and run for Android (both in the emulator and an actual Android phone), the slider only\r\nhas 18 tick values. In addition ,the property Slider.value returns a value which is 18/19 per tick.\r\n\r\n**To Reproduce**\r\nI wrote a simple example which demonstrates the bug clearly. I build and compiled like this:\r\n briefcase update android\r\n briefcase build android\r\n briefcase run android\r\n\r\nHere is the source code from app.py.\r\n\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nTIMER_MIN=1\r\nTIMER_MAX=19\r\n\r\nclass slidetest(toga.App):\r\n\r\n def startup(self):\r\n main_box = toga.Box()\r\n\r\n self.timer_label=toga.Label(text=\"Counter: \")\r\n self.timer_label.style.width=200\r\n self.timer_label.style.padding=5\r\n\r\n self.timer_slide=toga.Slider(range=(TIMER_MIN,TIMER_MAX),tick_count=(TIMER_MAX-TIMER_MIN+1),on_change=(self.timer_change),\r\n on_release=(self.timer_set))\r\n self.timer_slide.style.padding=5\r\n self.timer_slide.style.padding_top=5\r\n self.timer_slide.style.width=200\r\n self.timer_slide.value=10.0\r\n\r\n main_box.add(self.timer_slide)\r\n main_box.add(self.timer_label)\r\n\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = main_box\r\n self.main_window.show()\r\n\r\n def timer_change(self,widget):\r\n self.timer_label.text=\"Counter: \"+str(self.timer_slide.tick_value)\r\n\r\n def timer_set(self,widget):\r\n pass\r\n\r\ndef main():\r\n return slidetest()\r\n\r\n\r\n**Expected behavior**\r\nThis should produce a slider with 19 tick stops, and return values from 2.0 to 20.0. \r\n\r\nThe android build of this code has a slide with 18 tick stops and return values from 2.0 to 19.05263.\r\n\r\nScreenshots below just show screen capture of the android emulator with slider full left and full right position. You can see the resulting tick_value is 1 and 18 respectively.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n**Environment:**\r\n - Operating System: Windows\r\n - Python version:3.8\r\n - Software versions:\r\n - Briefcase: briefcase 0.3.5\r\n - Toga:\r\n - toga-core 0.3.0.dev27\r\n - toga-winforms 0.3.0.dev27\r\n \r\narrow 1.1.0\r\nbinaryornot 0.4.4\r\ncertifi 2021.5.30\r\nchardet 4.0.0\r\nclick 8.0.1\r\ncolorama 0.4.4\r\ncookiecutter 1.7.3\r\ngitdb 4.0.7\r\nGitPython 3.1.17\r\nidna 2.10\r\nJinja2 2.11.3\r\njinja2-time 0.2.0\r\nMarkupSafe 2.0.1\r\noauthlib 3.1.1\r\npip 21.1.1\r\npoyo 0.5.0\r\npycparser 2.20\r\npython-dateutil 2.8.1\r\npython-slugify 5.0.2\r\npythonnet 2.5.2\r\nsetuptools 56.0.0\r\nsix 1.16.0\r\nsmmap 4.0.0\r\ntext-unidecode 1.3\r\ntoml 0.10.2\r\ntravertino 0.1.3\r\nurllib3 1.26.5\r\nwheel 0.36.2\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom ..libs.android.view import View__MeasureSpec\nfrom ..libs.android.widget import SeekBar, SeekBar__OnSeekBarChangeListener\nfrom .base import Widget\n\n\nclass TogaOnSeekBarChangeListener(SeekBar__OnSeekBarChangeListener):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onProgressChanged(self, _view, _progress, _from_user):\n if self.impl.interface.on_change:\n self.impl.interface.on_change(widget=self.impl.interface)\n\n # Add two unused methods so that the Java interface is completely implemented.\n def onStartTrackingTouch(self, native_seekbar):\n pass\n\n def onStopTrackingTouch(self, native_seekbar):\n pass\n\n\n# Since Android's SeekBar is always discrete,\n# use a high number of steps for a \"continuous\" slider.\nDEFAULT_NUMBER_OF_TICKS = 10000\n\n\nclass Slider(Widget):\n def create(self):\n self.native = SeekBar(self._native_activity)\n self.native.setMax(DEFAULT_NUMBER_OF_TICKS)\n self.native.setOnSeekBarChangeListener(TogaOnSeekBarChangeListener(self))\n\n def get_value(self):\n minimum, maximum = self.interface.range\n n_steps = self.interface.tick_count\n if n_steps is None:\n n_steps = DEFAULT_NUMBER_OF_TICKS\n return (self.native.getProgress() * (maximum - minimum) / n_steps) + minimum\n\n def set_value(self, value):\n minimum, maximum = self.interface.range\n n_steps = self.interface.tick_count\n if n_steps is None:\n n_steps = DEFAULT_NUMBER_OF_TICKS\n self.native.setProgress(int((maximum - value - minimum) * n_steps))\n\n def set_range(self, range):\n pass\n\n def set_tick_count(self, tick_count):\n if tick_count is None:\n self.native.setMax(DEFAULT_NUMBER_OF_TICKS)\n else:\n self.native.setMax(int(tick_count) - 1)\n\n def rehint(self):\n self.native.measure(\n View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED\n )\n self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())\n self.interface.intrinsic.height = self.native.getMeasuredHeight()\n\n def set_on_change(self, handler):\n # No special handling required\n pass\n\n def set_on_press(self, handler):\n self.interface.factory.not_implemented(\"Slider.set_on_press()\")\n\n def set_on_release(self, handler):\n self.interface.factory.not_implemented(\"Slider.set_on_release()\")\n", "path": "src/android/toga_android/widgets/slider.py"}]} | 3,366 | 552 |
gh_patches_debug_19491 | rasdani/github-patches | git_diff | sunpy__sunpy-5493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix CROTA keyword in EUI maps
Currently EUI maps have a `CROTA` keyword, which by the FITS standard should really be a `CROTA2` keyword. This results in the warning
```python
/home/docs/checkouts/readthedocs.org/user_builds/solar-orbiter-python/envs/latest/lib/python3.8/site-packages/astropy/wcs/wcs.py:482: FITSFixedWarning: CROTA = 2.486914995997215 / [deg] rotation angle
keyword looks very much like CROTAn but isn't.
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
```
It would be good to
- Check if CROTA is in the header and CROTA2 isn't
- If so, rename the CROTA keyword to CROTA2
</issue>
<code>
[start of sunpy/map/sources/solo.py]
1 """
2 Solar Orbiter Map subclass definitions.
3 """
4 import astropy.units as u
5 from astropy.coordinates import CartesianRepresentation
6 from astropy.visualization import ImageNormalize, LinearStretch
7
8 from sunpy.coordinates import HeliocentricInertial
9 from sunpy.map import GenericMap
10 from sunpy.map.sources.source_type import source_stretch
11 from sunpy.time import parse_time
12
13 __all__ = ['EUIMap']
14
15
16 class EUIMap(GenericMap):
17 """
18 EUI Image Map
19
20 The Extreme Ultraviolet Imager (EUI) is a remote sensing instrument onboard the
21 Solar Orbiter (SolO) spacecraft. EUI has three telescopes that image the Sun in
22 Lyman-alpha (1216 Å) and the EUV (174 Å and 304 Å). The three telescopes are the
23 Full Sun Imager (FSI) and two High Resolution Imagers (HRI). The FSI images the
24 whole Sun in both 174 Å and 304 Å. The EUV and Lyman-alpha HRI telescopes image a
25 1000"-by-1000" patch in 174 Å and 1216 Å, respectively.
26
27 References
28 ----------
29 * `Solar Orbiter Mission Page <https://sci.esa.int/web/solar-orbiter/>`__
30 * `EUI Instrument Page <https://wwwbis.sidc.be/EUI/EUI/EUI/EUI/EUI/>`__
31 * `Instrument Paper <https://doi.org/10.1051/0004-6361/201936663>`__
32 """
33
34 def __init__(self, data, header, **kwargs):
35 super().__init__(data, header, **kwargs)
36 self._nickname = self.detector
37 self.plot_settings['cmap'] = self._get_cmap_name()
38 self.plot_settings['norm'] = ImageNormalize(
39 stretch=source_stretch(self.meta, LinearStretch()), clip=False)
40
41 @property
42 def processing_level(self):
43 if self.meta.get('level'):
44 # The level number is prepended by the letter L
45 return int(self.meta.get('level')[1:])
46
47 @property
48 def exposure_time(self):
49 return self.meta.get('xposure', 0.0) * self.timeunit
50
51 @property
52 def date(self):
53 t = self.meta.get('date-avg')
54 timesys = self.meta.get('timesys')
55 return parse_time(t, scale=timesys.lower())
56
57 @property
58 def _supported_observer_coordinates(self):
59 return [(('hcix_obs', 'hciy_obs', 'hciz_obs'),
60 {'x': self.meta.get('hcix_obs'),
61 'y': self.meta.get('hciy_obs'),
62 'z': self.meta.get('hciz_obs'),
63 'unit': u.m,
64 'representation_type': CartesianRepresentation,
65 'frame': HeliocentricInertial})] + super()._supported_observer_coordinates
66
67 @classmethod
68 def is_datasource_for(cls, data, header, **kwargs):
69 """Determines if header corresponds to an EUI image"""
70 is_solo = 'solar orbiter' in str(header.get('obsrvtry', '')).lower()
71 is_eui = str(header.get('instrume', '')).startswith('EUI')
72 return is_solo and is_eui
73
[end of sunpy/map/sources/solo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/map/sources/solo.py b/sunpy/map/sources/solo.py
--- a/sunpy/map/sources/solo.py
+++ b/sunpy/map/sources/solo.py
@@ -5,6 +5,7 @@
from astropy.coordinates import CartesianRepresentation
from astropy.visualization import ImageNormalize, LinearStretch
+from sunpy import log
from sunpy.coordinates import HeliocentricInertial
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
@@ -38,6 +39,10 @@
self.plot_settings['norm'] = ImageNormalize(
stretch=source_stretch(self.meta, LinearStretch()), clip=False)
+ if 'CROTA' in self.meta and 'CROTA2' not in self.meta:
+ log.debug("Renaming 'CROTA' to 'CROTA2'")
+ self.meta['CROTA2'] = self.meta.pop('CROTA')
+
@property
def processing_level(self):
if self.meta.get('level'):
| {"golden_diff": "diff --git a/sunpy/map/sources/solo.py b/sunpy/map/sources/solo.py\n--- a/sunpy/map/sources/solo.py\n+++ b/sunpy/map/sources/solo.py\n@@ -5,6 +5,7 @@\n from astropy.coordinates import CartesianRepresentation\n from astropy.visualization import ImageNormalize, LinearStretch\n \n+from sunpy import log\n from sunpy.coordinates import HeliocentricInertial\n from sunpy.map import GenericMap\n from sunpy.map.sources.source_type import source_stretch\n@@ -38,6 +39,10 @@\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, LinearStretch()), clip=False)\n \n+ if 'CROTA' in self.meta and 'CROTA2' not in self.meta:\n+ log.debug(\"Renaming 'CROTA' to 'CROTA2'\")\n+ self.meta['CROTA2'] = self.meta.pop('CROTA')\n+\n @property\n def processing_level(self):\n if self.meta.get('level'):\n", "issue": "Fix CROTA keyword in EUI maps\nCurrently EUI maps have a `CROTA` keyword, which by the FITS standard should really be a `CROTA2` keyword. This results in the warning\r\n```python\r\n/home/docs/checkouts/readthedocs.org/user_builds/solar-orbiter-python/envs/latest/lib/python3.8/site-packages/astropy/wcs/wcs.py:482: FITSFixedWarning: CROTA = 2.486914995997215 / [deg] rotation angle\r\nkeyword looks very much like CROTAn but isn't.\r\n wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,\r\n```\r\nIt would be good to\r\n- Check if CROTA is in the header and CROTA2 isn't\r\n- If so, rename the CROTA keyword to CROTA2\n", "before_files": [{"content": "\"\"\"\nSolar Orbiter Map subclass definitions.\n\"\"\"\nimport astropy.units as u\nfrom astropy.coordinates import CartesianRepresentation\nfrom astropy.visualization import ImageNormalize, LinearStretch\n\nfrom sunpy.coordinates import HeliocentricInertial\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\nfrom sunpy.time import parse_time\n\n__all__ = ['EUIMap']\n\n\nclass EUIMap(GenericMap):\n \"\"\"\n EUI Image Map\n\n The Extreme Ultraviolet Imager (EUI) is a remote sensing instrument onboard the\n Solar Orbiter (SolO) spacecraft. EUI has three telescopes that image the Sun in\n Lyman-alpha (1216 \u00c5) and the EUV (174 \u00c5 and 304 \u00c5). The three telescopes are the\n Full Sun Imager (FSI) and two High Resolution Imagers (HRI). The FSI images the\n whole Sun in both 174 \u00c5 and 304 \u00c5. The EUV and Lyman-alpha HRI telescopes image a\n 1000\"-by-1000\" patch in 174 \u00c5 and 1216 \u00c5, respectively.\n\n References\n ----------\n * `Solar Orbiter Mission Page <https://sci.esa.int/web/solar-orbiter/>`__\n * `EUI Instrument Page <https://wwwbis.sidc.be/EUI/EUI/EUI/EUI/EUI/>`__\n * `Instrument Paper <https://doi.org/10.1051/0004-6361/201936663>`__\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n self._nickname = self.detector\n self.plot_settings['cmap'] = self._get_cmap_name()\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, LinearStretch()), clip=False)\n\n @property\n def processing_level(self):\n if self.meta.get('level'):\n # The level number is prepended by the letter L\n return int(self.meta.get('level')[1:])\n\n @property\n def exposure_time(self):\n return self.meta.get('xposure', 0.0) * self.timeunit\n\n @property\n def date(self):\n t = self.meta.get('date-avg')\n timesys = self.meta.get('timesys')\n return parse_time(t, scale=timesys.lower())\n\n @property\n def _supported_observer_coordinates(self):\n return [(('hcix_obs', 'hciy_obs', 'hciz_obs'),\n {'x': self.meta.get('hcix_obs'),\n 'y': self.meta.get('hciy_obs'),\n 'z': self.meta.get('hciz_obs'),\n 'unit': u.m,\n 'representation_type': CartesianRepresentation,\n 'frame': HeliocentricInertial})] + super()._supported_observer_coordinates\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an EUI image\"\"\"\n is_solo = 'solar orbiter' in str(header.get('obsrvtry', '')).lower()\n is_eui = str(header.get('instrume', '')).startswith('EUI')\n return is_solo and is_eui\n", "path": "sunpy/map/sources/solo.py"}]} | 1,622 | 233 |
gh_patches_debug_10058 | rasdani/github-patches | git_diff | docker__docker-py-1972 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build context (.tar) is not prepared properly
Hey,
This morning I've updated to version `3.1.1` however, using this version i'm getting wired error from docker-engine build:
```
ERROR: Error processing tar file(exit status 1): mkdir /foodir/bardir: no such file or directory
```
and the actual building does not start.
took me some time to realise this is related to update i got this morning,
Reverting back to version `3.0.1`, I could build again.
*NOTE*: `/foodir/bardir` is censored due to security policy in my company,
so for the sake of this issue, lets assume this is the context:
- Dockerfile
- foodir
- bardir
- file
Also, path in error did start with `/` so i kept it there.
</issue>
<code>
[start of docker/utils/build.py]
1 import os
2 import re
3
4 from ..constants import IS_WINDOWS_PLATFORM
5 from fnmatch import fnmatch
6 from itertools import chain
7 from .utils import create_archive
8
9
10 def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
11 root = os.path.abspath(path)
12 exclude = exclude or []
13 return create_archive(
14 files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
15 root=root, fileobj=fileobj, gzip=gzip
16 )
17
18
19 _SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
20
21
22 def exclude_paths(root, patterns, dockerfile=None):
23 """
24 Given a root directory path and a list of .dockerignore patterns, return
25 an iterator of all paths (both regular files and directories) in the root
26 directory that do *not* match any of the patterns.
27
28 All paths returned are relative to the root.
29 """
30
31 if dockerfile is None:
32 dockerfile = 'Dockerfile'
33
34 def split_path(p):
35 return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
36
37 def normalize(p):
38 # Leading and trailing slashes are not relevant. Yes,
39 # "foo.py/" must exclude the "foo.py" regular file. "."
40 # components are not relevant either, even if the whole
41 # pattern is only ".", as the Docker reference states: "For
42 # historical reasons, the pattern . is ignored."
43 # ".." component must be cleared with the potential previous
44 # component, regardless of whether it exists: "A preprocessing
45 # step [...] eliminates . and .. elements using Go's
46 # filepath.".
47 i = 0
48 split = split_path(p)
49 while i < len(split):
50 if split[i] == '..':
51 del split[i]
52 if i > 0:
53 del split[i - 1]
54 i -= 1
55 else:
56 i += 1
57 return split
58
59 patterns = (
60 (True, normalize(p[1:]))
61 if p.startswith('!') else
62 (False, normalize(p))
63 for p in patterns)
64 patterns = list(reversed(list(chain(
65 # Exclude empty patterns such as "." or the empty string.
66 filter(lambda p: p[1], patterns),
67 # Always include the Dockerfile and .dockerignore
68 [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))
69 return set(walk(root, patterns))
70
71
72 def walk(root, patterns, default=True):
73 """
74 A collection of file lying below root that should be included according to
75 patterns.
76 """
77
78 def match(p):
79 if p[1][0] == '**':
80 rec = (p[0], p[1][1:])
81 return [p] + (match(rec) if rec[1] else [rec])
82 elif fnmatch(f, p[1][0]):
83 return [(p[0], p[1][1:])]
84 else:
85 return []
86
87 for f in os.listdir(root):
88 cur = os.path.join(root, f)
89 # The patterns if recursing in that directory.
90 sub = list(chain(*(match(p) for p in patterns)))
91 # Whether this file is explicitely included / excluded.
92 hit = next((p[0] for p in sub if not p[1]), None)
93 # Whether this file is implicitely included / excluded.
94 matched = default if hit is None else hit
95 sub = list(filter(lambda p: p[1], sub))
96 if os.path.isdir(cur):
97 # Entirely skip directories if there are no chance any subfile will
98 # be included.
99 if all(not p[0] for p in sub) and not matched:
100 continue
101 # I think this would greatly speed up dockerignore handling by not
102 # recursing into directories we are sure would be entirely
103 # included, and only yielding the directory itself, which will be
104 # recursively archived anyway. However the current unit test expect
105 # the full list of subfiles and I'm not 100% sure it would make no
106 # difference yet.
107 # if all(p[0] for p in sub) and matched:
108 # yield f
109 # continue
110 children = False
111 for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):
112 yield r
113 children = True
114 # The current unit tests expect directories only under those
115 # conditions. It might be simplifiable though.
116 if (not sub or not children) and hit or hit is None and default:
117 yield f
118 elif matched:
119 yield f
120
[end of docker/utils/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/utils/build.py b/docker/utils/build.py
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -93,7 +93,7 @@
# Whether this file is implicitely included / excluded.
matched = default if hit is None else hit
sub = list(filter(lambda p: p[1], sub))
- if os.path.isdir(cur):
+ if os.path.isdir(cur) and not os.path.islink(cur):
# Entirely skip directories if there are no chance any subfile will
# be included.
if all(not p[0] for p in sub) and not matched:
| {"golden_diff": "diff --git a/docker/utils/build.py b/docker/utils/build.py\n--- a/docker/utils/build.py\n+++ b/docker/utils/build.py\n@@ -93,7 +93,7 @@\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n- if os.path.isdir(cur):\n+ if os.path.isdir(cur) and not os.path.islink(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n", "issue": "Build context (.tar) is not prepared properly\nHey,\r\n\r\nThis morning I've updated to version `3.1.1` however, using this version i'm getting wired error from docker-engine build:\r\n```\r\nERROR: Error processing tar file(exit status 1): mkdir /foodir/bardir: no such file or directory\r\n```\r\nand the actual building does not start.\r\ntook me some time to realise this is related to update i got this morning, \r\nReverting back to version `3.0.1`, I could build again.\r\n\r\n*NOTE*: `/foodir/bardir` is censored due to security policy in my company,\r\nso for the sake of this issue, lets assume this is the context:\r\n- Dockerfile\r\n- foodir\r\n - bardir\r\n - file\r\n\r\nAlso, path in error did start with `/` so i kept it there.\r\n\n", "before_files": [{"content": "import os\nimport re\n\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom fnmatch import fnmatch\nfrom itertools import chain\nfrom .utils import create_archive\n\n\ndef tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):\n root = os.path.abspath(path)\n exclude = exclude or []\n return create_archive(\n files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),\n root=root, fileobj=fileobj, gzip=gzip\n )\n\n\n_SEP = re.compile('/|\\\\\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n def split_path(p):\n return [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n\n def normalize(p):\n # Leading and trailing slashes are not relevant. Yes,\n # \"foo.py/\" must exclude the \"foo.py\" regular file. \".\"\n # components are not relevant either, even if the whole\n # pattern is only \".\", as the Docker reference states: \"For\n # historical reasons, the pattern . is ignored.\"\n # \"..\" component must be cleared with the potential previous\n # component, regardless of whether it exists: \"A preprocessing\n # step [...] eliminates . and .. elements using Go's\n # filepath.\".\n i = 0\n split = split_path(p)\n while i < len(split):\n if split[i] == '..':\n del split[i]\n if i > 0:\n del split[i - 1]\n i -= 1\n else:\n i += 1\n return split\n\n patterns = (\n (True, normalize(p[1:]))\n if p.startswith('!') else\n (False, normalize(p))\n for p in patterns)\n patterns = list(reversed(list(chain(\n # Exclude empty patterns such as \".\" or the empty string.\n filter(lambda p: p[1], patterns),\n # Always include the Dockerfile and .dockerignore\n [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))\n return set(walk(root, patterns))\n\n\ndef walk(root, patterns, default=True):\n \"\"\"\n A collection of file lying below root that should be included according to\n patterns.\n \"\"\"\n\n def match(p):\n if p[1][0] == '**':\n rec = (p[0], p[1][1:])\n return [p] + (match(rec) if rec[1] else [rec])\n elif fnmatch(f, p[1][0]):\n return [(p[0], p[1][1:])]\n else:\n return []\n\n for f in os.listdir(root):\n cur = os.path.join(root, f)\n # The patterns if recursing in that directory.\n sub = list(chain(*(match(p) for p in patterns)))\n # Whether this file is explicitely included / excluded.\n hit = next((p[0] for p in sub if not p[1]), None)\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n if os.path.isdir(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n continue\n # I think this would greatly speed up dockerignore handling by not\n # recursing into directories we are sure would be entirely\n # included, and only yielding the directory itself, which will be\n # recursively archived anyway. However the current unit test expect\n # the full list of subfiles and I'm not 100% sure it would make no\n # difference yet.\n # if all(p[0] for p in sub) and matched:\n # yield f\n # continue\n children = False\n for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):\n yield r\n children = True\n # The current unit tests expect directories only under those\n # conditions. It might be simplifiable though.\n if (not sub or not children) and hit or hit is None and default:\n yield f\n elif matched:\n yield f\n", "path": "docker/utils/build.py"}]} | 1,988 | 139 |
gh_patches_debug_742 | rasdani/github-patches | git_diff | streamlink__streamlink-3952 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add lxml dependency
### Checklist
- [X] This is a feature request and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin requests](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22feature+request%22)
### Description
Streamlink should finally switch to a proper HTML/XML parser for extracting data instead of using cheap regex workarounds which don't work properly. I've already commented on this issue last year:
https://github.com/streamlink/streamlink/issues/3241#issuecomment-706486239
The reason why I'm suggesting this again right now is that I was trying to fix the deutschewelle plugin (https://dw.com) yesterday and ran into issues with the `itertags` utility method, which is based on simple regexes for iterating HTML nodes and their attributes+body. `itertags` for example does not work with nested nodes, which makes adding ridiculous custom regexes necessary. Just take a look at this madness:
https://github.com/streamlink/streamlink/blob/3668770d608f0fab54d40a46acd6720a97f63775/src/streamlink/plugins/deutschewelle.py#L18-L29
With `lxml` (https://lxml.de/), HTML page contents can be parsed and the data extracted via XPath queries and/or the respective API methods. The methods are similar to python's native `xml.etree.ElementTree`, which itself is considered too slow and unsafe in certain cases. I am by no means an expert regarding python's standard library though, so if someone has better insight here, please share. In regards to packaging, this lib is available on basically every packaging system and adding it as a dependency here only has benefits.
I'd suggest that we add `lxml` as a dependency now and start using it for extracting data from HTML documents. The validation schema methods could be improved for this as well. There's also the `parse_xml` utility method, which is currently based on the native module.
Comments?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 from os import environ, path
4 from sys import argv, path as sys_path
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10
11 data_files = []
12 deps = [
13 "requests>=2.26.0,<3.0",
14 "isodate",
15 "websocket-client>=0.58.0",
16 # Support for SOCKS proxies
17 "PySocks!=1.5.7,>=1.5.6",
18 ]
19
20 # for encrypted streams
21 if environ.get("STREAMLINK_USE_PYCRYPTO"):
22 deps.append("pycrypto")
23 else:
24 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
25 deps.append("pycryptodome>=3.4.3,<4")
26
27 # for localization
28 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
29 deps.append("pycountry")
30 else:
31 deps.append("iso-639")
32 deps.append("iso3166")
33
34 # When we build an egg for the Win32 bootstrap we don"t want dependency
35 # information built into it.
36 if environ.get("NO_DEPS"):
37 deps = []
38
39 this_directory = path.abspath(path.dirname(__file__))
40 srcdir = path.join(this_directory, "src/")
41 sys_path.insert(0, srcdir)
42
43 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
44 long_description = f.read()
45
46
47 def is_wheel_for_windows():
48 if "bdist_wheel" in argv:
49 names = ["win32", "win-amd64", "cygwin"]
50 length = len(argv)
51 for pos in range(argv.index("bdist_wheel") + 1, length):
52 if argv[pos] == "--plat-name" and pos + 1 < length:
53 return argv[pos + 1] in names
54 elif argv[pos][:12] == "--plat-name=":
55 return argv[pos][12:] in names
56 return False
57
58
59 entry_points = {
60 "console_scripts": ["streamlink=streamlink_cli.main:main"]
61 }
62
63 if is_wheel_for_windows():
64 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
65
66
67 additional_files = [
68 ("share/man/man1", ["docs/_build/man/streamlink.1"])
69 ]
70
71 for destdir, srcfiles in additional_files:
72 files = []
73 for srcfile in srcfiles:
74 if path.exists(srcfile):
75 files.append(srcfile)
76 if files:
77 data_files.append((destdir, files))
78
79
80 setup(name="streamlink",
81 version=versioneer.get_version(),
82 cmdclass=versioneer.get_cmdclass(),
83 description="Streamlink is a command-line utility that extracts streams "
84 "from various services and pipes them into a video player of "
85 "choice.",
86 long_description=long_description,
87 long_description_content_type="text/markdown",
88 url="https://github.com/streamlink/streamlink",
89 project_urls={
90 "Documentation": "https://streamlink.github.io/",
91 "Tracker": "https://github.com/streamlink/streamlink/issues",
92 "Source": "https://github.com/streamlink/streamlink",
93 "Funding": "https://opencollective.com/streamlink"
94 },
95 author="Streamlink",
96 # temp until we have a mailing list / global email
97 author_email="[email protected]",
98 license="Simplified BSD",
99 packages=find_packages("src"),
100 package_dir={"": "src"},
101 package_data={"streamlink.plugins": [".removed"]},
102 entry_points=entry_points,
103 data_files=data_files,
104 install_requires=deps,
105 test_suite="tests",
106 python_requires=">=3.6, <4",
107 classifiers=["Development Status :: 5 - Production/Stable",
108 "License :: OSI Approved :: BSD License",
109 "Environment :: Console",
110 "Intended Audience :: End Users/Desktop",
111 "Operating System :: POSIX",
112 "Operating System :: Microsoft :: Windows",
113 "Operating System :: MacOS",
114 "Programming Language :: Python :: 3",
115 "Programming Language :: Python :: 3 :: Only",
116 "Programming Language :: Python :: 3.6",
117 "Programming Language :: Python :: 3.7",
118 "Programming Language :: Python :: 3.8",
119 "Programming Language :: Python :: 3.9",
120 "Topic :: Internet :: WWW/HTTP",
121 "Topic :: Multimedia :: Sound/Audio",
122 "Topic :: Multimedia :: Video",
123 "Topic :: Utilities"])
124
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,6 +12,7 @@
deps = [
"requests>=2.26.0,<3.0",
"isodate",
+ "lxml>=4.6.3",
"websocket-client>=0.58.0",
# Support for SOCKS proxies
"PySocks!=1.5.7,>=1.5.6",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,6 +12,7 @@\n deps = [\n \"requests>=2.26.0,<3.0\",\n \"isodate\",\n+ \"lxml>=4.6.3\",\n \"websocket-client>=0.58.0\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n", "issue": "Add lxml dependency\n### Checklist\n\n- [X] This is a feature request and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin requests](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22feature+request%22)\n\n### Description\n\nStreamlink should finally switch to a proper HTML/XML parser for extracting data instead of using cheap regex workarounds which don't work properly. I've already commented on this issue last year:\r\nhttps://github.com/streamlink/streamlink/issues/3241#issuecomment-706486239\r\n\r\nThe reason why I'm suggesting this again right now is that I was trying to fix the deutschewelle plugin (https://dw.com) yesterday and ran into issues with the `itertags` utility method, which is based on simple regexes for iterating HTML nodes and their attributes+body. `itertags` for example does not work with nested nodes, which makes adding ridiculous custom regexes necessary. Just take a look at this madness:\r\nhttps://github.com/streamlink/streamlink/blob/3668770d608f0fab54d40a46acd6720a97f63775/src/streamlink/plugins/deutschewelle.py#L18-L29\r\n\r\nWith `lxml` (https://lxml.de/), HTML page contents can be parsed and the data extracted via XPath queries and/or the respective API methods. The methods are similar to python's native `xml.etree.ElementTree`, which itself is considered too slow and unsafe in certain cases. I am by no means an expert regarding python's standard library though, so if someone has better insight here, please share. In regards to packaging, this lib is available on basically every packaging system and adding it as a dependency here only has benefits.\r\n\r\nI'd suggest that we add `lxml` as a dependency now and start using it for extracting data from HTML documents. The validation schema methods could be improved for this as well. There's also the `parse_xml` utility method, which is currently based on the native module.\r\n\r\nComments?\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndata_files = []\ndeps = [\n \"requests>=2.26.0,<3.0\",\n \"isodate\",\n \"websocket-client>=0.58.0\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nadditional_files = [\n (\"share/man/man1\", [\"docs/_build/man/streamlink.1\"])\n]\n\nfor destdir, srcfiles in additional_files:\n files = []\n for srcfile in srcfiles:\n if path.exists(srcfile):\n files.append(srcfile)\n if files:\n data_files.append((destdir, files))\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\"streamlink.plugins\": [\".removed\"]},\n entry_points=entry_points,\n data_files=data_files,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]} | 2,288 | 106 |
gh_patches_debug_5701 | rasdani/github-patches | git_diff | getpelican__pelican-3094 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A dead link on PyPI for the contributions and feedback
I just stumbled upon [Pelican's page in PyPI](https://pypi.org/project/pelican/) and found that the l[ink for the contributions and feedback](https://pypi.org/project/pelican/CONTRIBUTING.rst) is dead. Perhaps, it needs to be updated?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from os import walk
4 from os.path import join, relpath
5
6 from setuptools import find_packages, setup
7
8
9 version = "4.8.0"
10
11 requires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments',
12 'docutils>=0.15', 'pytz >= 0a', 'blinker', 'unidecode',
13 'python-dateutil', 'rich']
14
15 entry_points = {
16 'console_scripts': [
17 'pelican = pelican.__main__:main',
18 'pelican-import = pelican.tools.pelican_import:main',
19 'pelican-quickstart = pelican.tools.pelican_quickstart:main',
20 'pelican-themes = pelican.tools.pelican_themes:main',
21 'pelican-plugins = pelican.plugins._utils:list_plugins'
22 ]
23 }
24
25 README = open('README.rst', encoding='utf-8').read()
26 CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()
27
28 description = '\n'.join([README, CHANGELOG])
29
30 setup(
31 name='pelican',
32 version=version,
33 url='https://getpelican.com/',
34 author='Justin Mayer',
35 author_email='[email protected]',
36 description="Static site generator supporting reStructuredText and "
37 "Markdown source content.",
38 project_urls={
39 'Documentation': 'https://docs.getpelican.com/',
40 'Funding': 'https://donate.getpelican.com/',
41 'Source': 'https://github.com/getpelican/pelican',
42 'Tracker': 'https://github.com/getpelican/pelican/issues',
43 },
44 keywords='static web site generator SSG reStructuredText Markdown',
45 license='AGPLv3',
46 long_description=description,
47 long_description_content_type='text/x-rst',
48 packages=find_packages(),
49 include_package_data=True, # includes all in MANIFEST.in if in package
50 # NOTE : This will collect any files that happen to be in the themes
51 # directory, even though they may not be checked into version control.
52 package_data={ # pelican/themes is not a package, so include manually
53 'pelican': [relpath(join(root, name), 'pelican')
54 for root, _, names in walk(join('pelican', 'themes'))
55 for name in names],
56 },
57 install_requires=requires,
58 extras_require={
59 'Markdown': ['markdown~=3.1.1']
60 },
61 entry_points=entry_points,
62 classifiers=[
63 'Development Status :: 5 - Production/Stable',
64 'Environment :: Console',
65 'Framework :: Pelican',
66 'License :: OSI Approved :: GNU Affero General Public License v3',
67 'Operating System :: OS Independent',
68 'Programming Language :: Python :: 3',
69 'Programming Language :: Python :: 3.7',
70 'Programming Language :: Python :: 3.8',
71 'Programming Language :: Python :: 3.9',
72 'Programming Language :: Python :: 3.10',
73 'Programming Language :: Python :: Implementation :: CPython',
74 'Topic :: Internet :: WWW/HTTP',
75 'Topic :: Software Development :: Libraries :: Python Modules',
76 ],
77 test_suite='pelican.tests',
78 )
79
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,6 +25,13 @@
README = open('README.rst', encoding='utf-8').read()
CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()
+# Relative links in the README must be converted to absolute URL's
+# so that they render correctly on PyPI.
+README = README.replace(
+ "<CONTRIBUTING.rst>",
+ "<https://docs.getpelican.com/en/latest/contribute.html>",
+)
+
description = '\n'.join([README, CHANGELOG])
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,6 +25,13 @@\n README = open('README.rst', encoding='utf-8').read()\n CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n \n+# Relative links in the README must be converted to absolute URL's\n+# so that they render correctly on PyPI.\n+README = README.replace(\n+ \"<CONTRIBUTING.rst>\",\n+ \"<https://docs.getpelican.com/en/latest/contribute.html>\",\n+)\n+\n description = '\\n'.join([README, CHANGELOG])\n \n setup(\n", "issue": "A dead link on PyPI for the contributions and feedback\nI just stumbled upon [Pelican's page in PyPI](https://pypi.org/project/pelican/) and found that the l[ink for the contributions and feedback](https://pypi.org/project/pelican/CONTRIBUTING.rst) is dead. Perhaps, it needs to be updated?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom os import walk\nfrom os.path import join, relpath\n\nfrom setuptools import find_packages, setup\n\n\nversion = \"4.8.0\"\n\nrequires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments',\n 'docutils>=0.15', 'pytz >= 0a', 'blinker', 'unidecode',\n 'python-dateutil', 'rich']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican.__main__:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main',\n 'pelican-plugins = pelican.plugins._utils:list_plugins'\n ]\n}\n\nREADME = open('README.rst', encoding='utf-8').read()\nCHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n\ndescription = '\\n'.join([README, CHANGELOG])\n\nsetup(\n name='pelican',\n version=version,\n url='https://getpelican.com/',\n author='Justin Mayer',\n author_email='[email protected]',\n description=\"Static site generator supporting reStructuredText and \"\n \"Markdown source content.\",\n project_urls={\n 'Documentation': 'https://docs.getpelican.com/',\n 'Funding': 'https://donate.getpelican.com/',\n 'Source': 'https://github.com/getpelican/pelican',\n 'Tracker': 'https://github.com/getpelican/pelican/issues',\n },\n keywords='static web site generator SSG reStructuredText Markdown',\n license='AGPLv3',\n long_description=description,\n long_description_content_type='text/x-rst',\n packages=find_packages(),\n include_package_data=True, # includes all in MANIFEST.in if in package\n # NOTE : This will collect any files that happen to be in the themes\n # directory, even though they may not be checked into version control.\n package_data={ # pelican/themes is not a package, so include manually\n 'pelican': [relpath(join(root, name), 'pelican')\n for root, _, names in walk(join('pelican', 'themes'))\n for name in names],\n },\n install_requires=requires,\n extras_require={\n 'Markdown': ['markdown~=3.1.1']\n },\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Pelican',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}]} | 1,467 | 143 |
gh_patches_debug_29680 | rasdani/github-patches | git_diff | ARM-DOE__ACT-664 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
act.utils.decode_present_weather classification tables
* ACT version: 1.4.2
* Python version: 3.9.16
* Operating System: Windows 10
### Description
I was working on decoding the FD70 present weather codes for the ATMOS FD70. It appears the FD70 uses both the WMO 4680 and 4677 in the numeric code. For it's metar code it uses WMO 4678.
When the code encountered a value of 88, for snow pellets, which it adopted from the 4677 table, it errors and stops. Suggestion would possible be adding in some of the WMO4677 to fill number gaps if applicable.
### What I Did
```
input:
act.utils.decode_present_weather(ds,variable='present_wx0')
output:
File D:\anaconda3\lib\site-packages\act\utils\inst_utils.py:133 in <listcomp>
wx_type = [weather[d] for d in data.values]
KeyError: 88
```
</issue>
<code>
[start of act/utils/inst_utils.py]
1 """
2 Functions containing utilities for instruments.
3
4 """
5
6
7 def decode_present_weather(ds, variable=None, decoded_name=None):
8 """
9 This function is to decode codes reported from automatic weather stations suchas the PWD22.
10 This is based on WMO Table 4680.
11
12 Parameters
13 ----------
14 ds : xarray.Dataset
15 ACT or Xarray dataset from which to convert codes
16 variable : string
17 Variable to decode
18 decoded_name : string
19 New variable name to store updated labels
20
21 Returns
22 -------
23 ds : xarray.Dataset
24 Returns dataset with new decoded data
25
26 References
27 ----------
28 WMO Manual on Code Volume I.1 A-360.
29 https://library.wmo.int/doc_num.php?explnum_id=10235
30
31 """
32
33 # Check to ensure that a variable name is passed
34 if variable is None:
35 raise ValueError('You must specify a variable')
36
37 if variable not in ds:
38 raise ValueError('Variable not in the dataset')
39
40 # Define the weather hash
41 weather = {
42 0: 'No significant weather observed',
43 1: 'Clouds generally dissolving or becoming less developed during the past hour',
44 2: 'State of the sky on the whole unchanged during the past hour',
45 3: 'Clouds generally forming or developing during the past hour',
46 4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
47 5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
48 10: 'Mist',
49 11: 'Diamond dust',
50 12: 'Distant lightning',
51 18: 'Squalls',
52 20: 'Fog',
53 21: 'Precipitation',
54 22: 'Drizzle (not freezing) or snow grains',
55 23: 'Rain (not freezing)',
56 24: 'Snow',
57 25: 'Freezing drizzle or freezing rain',
58 26: 'Thunderstorm (with or without precipitation)',
59 27: 'Blowing or drifting snow or sand',
60 28: 'Blowing or drifting snow or sand, visibility >= 1 km',
61 29: 'Blowing or drifting snow or sand, visibility < 1 km',
62 30: 'Fog',
63 31: 'Fog or ice fog in patches',
64 32: 'Fog or ice fog, has become thinner during the past hour',
65 33: 'Fog or ice fog, no appreciable change during the past hour',
66 34: 'Fog or ice fog, has begun or become thicker during the past hour',
67 35: 'Fog, depositing rime',
68 40: 'Precipitation',
69 41: 'Precipitation, slight or moderate',
70 42: 'Precipitation, heavy',
71 43: 'Liquid precipitation, slight or moderate',
72 44: 'Liquid precipitation, heavy',
73 45: 'Solid precipitation, slight or moderate',
74 46: 'Solid precipitation, heavy',
75 47: 'Freezing precipitation, slight or moderate',
76 48: 'Freezing precipitation, heavy',
77 50: 'Drizzle',
78 51: 'Drizzle, not freezing, slight',
79 52: 'Drizzle, not freezing, moderate',
80 53: 'Drizzle, not freezing, heavy',
81 54: 'Drizzle, freezing, slight',
82 55: 'Drizzle, freezing, moderate',
83 56: 'Drizzle, freezing, heavy',
84 57: 'Drizzle and rain, slight',
85 58: 'Drizzle and rain, moderate or heavy',
86 60: 'Rain',
87 61: 'Rain, not freezing, slight',
88 62: 'Rain, not freezing, moderate',
89 63: 'Rain, not freezing, heavy',
90 64: 'Rain, freezing, slight',
91 65: 'Rain, freezing, moderate',
92 66: 'Rain, freezing, heavy',
93 67: 'Rain (or drizzle) and snow, slight',
94 68: 'Rain (or drizzle) and snow, moderate or heavy',
95 70: 'Snow',
96 71: 'Snow, light',
97 72: 'Snow, moderate',
98 73: 'Snow, heavy',
99 74: 'Ice pellets, slight',
100 75: 'Ice pellets, moderate',
101 76: 'Ice pellets, heavy',
102 77: 'Snow grains',
103 78: 'Ice crystals',
104 80: 'Shower(s) or Intermittent Precipitation',
105 81: 'Rain shower(s) or intermittent rain, slight',
106 82: 'Rain shower(s) or intermittent rain, moderate',
107 83: 'Rain shower(s) or intermittent rain, heavy',
108 84: 'Rain shower(s) or intermittent rain, violent',
109 85: 'Snow shower(s) or intermittent snow, slight',
110 86: 'Snow shower(s) or intermittent snow, moderate',
111 87: 'Snow shower(s) or intermittent snow, heavy',
112 89: 'Hail',
113 90: 'Thunderstorm',
114 91: 'Thunderstorm, slight or moderate, with no precipitation',
115 92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
116 93: 'Thunderstorm, slight or moderate, with hail',
117 94: 'Thunderstorm, heavy, with no precipitation',
118 95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
119 96: 'Thunderstorm, heavy, with hail',
120 99: 'Tornado',
121 -9999: 'Missing',
122 }
123
124 # If a decoded name is not passed, make one
125 if decoded_name is None:
126 decoded_name = variable + '_decoded'
127
128 # Get data and fill nans with -9999
129 data = ds[variable]
130 data = data.fillna(-9999)
131
132 # Get the weather type for each code
133 wx_type = [weather[d] for d in data.values]
134
135 # Massage the data array to set back in the dataset
136 data.values = wx_type
137 if 'long_name' in data.attrs:
138 data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
139 else:
140 data.attrs['long_name'] = 'Decoded present weather values'
141 if 'valid_min' in data.attrs:
142 del data.attrs['valid_min']
143 if 'valid_max' in data.attrs:
144 del data.attrs['valid_max']
145
146 ds[decoded_name] = data
147
148 return ds
149
[end of act/utils/inst_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/act/utils/inst_utils.py b/act/utils/inst_utils.py
--- a/act/utils/inst_utils.py
+++ b/act/utils/inst_utils.py
@@ -6,8 +6,8 @@
def decode_present_weather(ds, variable=None, decoded_name=None):
"""
- This function is to decode codes reported from automatic weather stations suchas the PWD22.
- This is based on WMO Table 4680.
+ This function is to decode codes reported from automatic weather stations such as the PWD22.
+ This is based on WMO Table 4680 as well as a supplement table for WMO table 4677.
Parameters
----------
@@ -37,8 +37,8 @@
if variable not in ds:
raise ValueError('Variable not in the dataset')
- # Define the weather hash
- weather = {
+ # Define the weather hash for WMO table 4680.
+ weather_4680 = {
0: 'No significant weather observed',
1: 'Clouds generally dissolving or becoming less developed during the past hour',
2: 'State of the sky on the whole unchanged during the past hour',
@@ -121,6 +121,18 @@
-9999: 'Missing',
}
+ # Define the weather hash for WMO table 4677.
+ weather_4677 = {
+ 88: 'Shower(s) of snow pellets or small hail, with or without rain or rain and snow mixed, moderate or heavy',
+ }
+
+ # Join weather tables
+ weather_combined = dict(weather_4680)
+ weather_combined.update(weather_4677)
+
+ # Sort keys to be in order
+ weather = dict(sorted(weather_combined.items()))
+
# If a decoded name is not passed, make one
if decoded_name is None:
decoded_name = variable + '_decoded'
| {"golden_diff": "diff --git a/act/utils/inst_utils.py b/act/utils/inst_utils.py\n--- a/act/utils/inst_utils.py\n+++ b/act/utils/inst_utils.py\n@@ -6,8 +6,8 @@\n \n def decode_present_weather(ds, variable=None, decoded_name=None):\n \"\"\"\n- This function is to decode codes reported from automatic weather stations suchas the PWD22.\n- This is based on WMO Table 4680.\n+ This function is to decode codes reported from automatic weather stations such as the PWD22.\n+ This is based on WMO Table 4680 as well as a supplement table for WMO table 4677.\n \n Parameters\n ----------\n@@ -37,8 +37,8 @@\n if variable not in ds:\n raise ValueError('Variable not in the dataset')\n \n- # Define the weather hash\n- weather = {\n+ # Define the weather hash for WMO table 4680.\n+ weather_4680 = {\n 0: 'No significant weather observed',\n 1: 'Clouds generally dissolving or becoming less developed during the past hour',\n 2: 'State of the sky on the whole unchanged during the past hour',\n@@ -121,6 +121,18 @@\n -9999: 'Missing',\n }\n \n+ # Define the weather hash for WMO table 4677.\n+ weather_4677 = {\n+ 88: 'Shower(s) of snow pellets or small hail, with or without rain or rain and snow mixed, moderate or heavy',\n+ }\n+\n+ # Join weather tables\n+ weather_combined = dict(weather_4680)\n+ weather_combined.update(weather_4677)\n+\n+ # Sort keys to be in order\n+ weather = dict(sorted(weather_combined.items()))\n+\n # If a decoded name is not passed, make one\n if decoded_name is None:\n decoded_name = variable + '_decoded'\n", "issue": "act.utils.decode_present_weather classification tables\n* ACT version: 1.4.2\r\n* Python version: 3.9.16\r\n* Operating System: Windows 10\r\n\r\n### Description\r\n\r\nI was working on decoding the FD70 present weather codes for the ATMOS FD70. It appears the FD70 uses both the WMO 4680 and 4677 in the numeric code. For it's metar code it uses WMO 4678. \r\n\r\nWhen the code encountered a value of 88, for snow pellets, which it adopted from the 4677 table, it errors and stops. Suggestion would possible be adding in some of the WMO4677 to fill number gaps if applicable. \r\n\r\n### What I Did\r\n\r\n```\r\ninput: \r\nact.utils.decode_present_weather(ds,variable='present_wx0')\r\noutput:\r\n\r\n File D:\\anaconda3\\lib\\site-packages\\act\\utils\\inst_utils.py:133 in <listcomp>\r\n wx_type = [weather[d] for d in data.values]\r\n\r\nKeyError: 88\r\n\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nFunctions containing utilities for instruments.\n\n\"\"\"\n\n\ndef decode_present_weather(ds, variable=None, decoded_name=None):\n \"\"\"\n This function is to decode codes reported from automatic weather stations suchas the PWD22.\n This is based on WMO Table 4680.\n\n Parameters\n ----------\n ds : xarray.Dataset\n ACT or Xarray dataset from which to convert codes\n variable : string\n Variable to decode\n decoded_name : string\n New variable name to store updated labels\n\n Returns\n -------\n ds : xarray.Dataset\n Returns dataset with new decoded data\n\n References\n ----------\n WMO Manual on Code Volume I.1 A-360.\n https://library.wmo.int/doc_num.php?explnum_id=10235\n\n \"\"\"\n\n # Check to ensure that a variable name is passed\n if variable is None:\n raise ValueError('You must specify a variable')\n\n if variable not in ds:\n raise ValueError('Variable not in the dataset')\n\n # Define the weather hash\n weather = {\n 0: 'No significant weather observed',\n 1: 'Clouds generally dissolving or becoming less developed during the past hour',\n 2: 'State of the sky on the whole unchanged during the past hour',\n 3: 'Clouds generally forming or developing during the past hour',\n 4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',\n 5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',\n 10: 'Mist',\n 11: 'Diamond dust',\n 12: 'Distant lightning',\n 18: 'Squalls',\n 20: 'Fog',\n 21: 'Precipitation',\n 22: 'Drizzle (not freezing) or snow grains',\n 23: 'Rain (not freezing)',\n 24: 'Snow',\n 25: 'Freezing drizzle or freezing rain',\n 26: 'Thunderstorm (with or without precipitation)',\n 27: 'Blowing or drifting snow or sand',\n 28: 'Blowing or drifting snow or sand, visibility >= 1 km',\n 29: 'Blowing or drifting snow or sand, visibility < 1 km',\n 30: 'Fog',\n 31: 'Fog or ice fog in patches',\n 32: 'Fog or ice fog, has become thinner during the past hour',\n 33: 'Fog or ice fog, no appreciable change during the past hour',\n 34: 'Fog or ice fog, has begun or become thicker during the past hour',\n 35: 'Fog, depositing rime',\n 40: 'Precipitation',\n 41: 'Precipitation, slight or moderate',\n 42: 'Precipitation, heavy',\n 43: 'Liquid precipitation, slight or moderate',\n 44: 'Liquid precipitation, heavy',\n 45: 'Solid precipitation, slight or moderate',\n 46: 'Solid precipitation, heavy',\n 47: 'Freezing precipitation, slight or moderate',\n 48: 'Freezing precipitation, heavy',\n 50: 'Drizzle',\n 51: 'Drizzle, not freezing, slight',\n 52: 'Drizzle, not freezing, moderate',\n 53: 'Drizzle, not freezing, heavy',\n 54: 'Drizzle, freezing, slight',\n 55: 'Drizzle, freezing, moderate',\n 56: 'Drizzle, freezing, heavy',\n 57: 'Drizzle and rain, slight',\n 58: 'Drizzle and rain, moderate or heavy',\n 60: 'Rain',\n 61: 'Rain, not freezing, slight',\n 62: 'Rain, not freezing, moderate',\n 63: 'Rain, not freezing, heavy',\n 64: 'Rain, freezing, slight',\n 65: 'Rain, freezing, moderate',\n 66: 'Rain, freezing, heavy',\n 67: 'Rain (or drizzle) and snow, slight',\n 68: 'Rain (or drizzle) and snow, moderate or heavy',\n 70: 'Snow',\n 71: 'Snow, light',\n 72: 'Snow, moderate',\n 73: 'Snow, heavy',\n 74: 'Ice pellets, slight',\n 75: 'Ice pellets, moderate',\n 76: 'Ice pellets, heavy',\n 77: 'Snow grains',\n 78: 'Ice crystals',\n 80: 'Shower(s) or Intermittent Precipitation',\n 81: 'Rain shower(s) or intermittent rain, slight',\n 82: 'Rain shower(s) or intermittent rain, moderate',\n 83: 'Rain shower(s) or intermittent rain, heavy',\n 84: 'Rain shower(s) or intermittent rain, violent',\n 85: 'Snow shower(s) or intermittent snow, slight',\n 86: 'Snow shower(s) or intermittent snow, moderate',\n 87: 'Snow shower(s) or intermittent snow, heavy',\n 89: 'Hail',\n 90: 'Thunderstorm',\n 91: 'Thunderstorm, slight or moderate, with no precipitation',\n 92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',\n 93: 'Thunderstorm, slight or moderate, with hail',\n 94: 'Thunderstorm, heavy, with no precipitation',\n 95: 'Thunderstorm, heavy, with rain showers and/or snow showers',\n 96: 'Thunderstorm, heavy, with hail',\n 99: 'Tornado',\n -9999: 'Missing',\n }\n\n # If a decoded name is not passed, make one\n if decoded_name is None:\n decoded_name = variable + '_decoded'\n\n # Get data and fill nans with -9999\n data = ds[variable]\n data = data.fillna(-9999)\n\n # Get the weather type for each code\n wx_type = [weather[d] for d in data.values]\n\n # Massage the data array to set back in the dataset\n data.values = wx_type\n if 'long_name' in data.attrs:\n data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n else:\n data.attrs['long_name'] = 'Decoded present weather values'\n if 'valid_min' in data.attrs:\n del data.attrs['valid_min']\n if 'valid_max' in data.attrs:\n del data.attrs['valid_max']\n\n ds[decoded_name] = data\n\n return ds\n", "path": "act/utils/inst_utils.py"}]} | 2,656 | 450 |
gh_patches_debug_5379 | rasdani/github-patches | git_diff | cltk__cltk-399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Compile Poeti d’Italia for CLTK
http://www.mqdq.it/mqdq/poetiditalia/indice_autori_alfa.jsp?scelta=AZ&path=metri_opere
</issue>
<code>
[start of cltk/corpus/latin/corpora.py]
1 """Latin language corpora available for download or loading locally.
2 All remote corpora hosted by github on the cltk organization account, eg:
3 'http://github.com/cltk' + name
4 """
5
6 LATIN_CORPORA = [
7 {'encoding': 'utf-8',
8 'markup': 'tei_xml',
9 'location': 'remote',
10 'type': 'text',
11 'name': 'latin_text_perseus',
12 'origin': 'https://github.com/cltk/latin_text_perseus.git'},
13 {'encoding': 'utf-8',
14 'markup': 'xml',
15 'name': 'latin_treebank_perseus',
16 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',
17 'location': 'remote',
18 'type': 'treebank'},
19 {'encoding': 'utf-8',
20 'markup': 'plaintext',
21 'name': 'latin_treebank_perseus',
22 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',
23 'location': 'remote',
24 'type': 'text'},
25 {'encoding': 'utf-8',
26 'markup': 'plaintext',
27 'name': 'latin_text_latin_library',
28 'origin': 'https://github.com/cltk/latin_text_latin_library.git',
29 'location': 'remote',
30 'type': 'text'},
31 {'encoding': 'latin-1',
32 'markup': 'beta_code',
33 'name': '',
34 'location': 'local',
35 'name': 'phi5',
36 'origin': None,
37 'type': 'text'},
38 {'encoding': 'latin-1',
39 'markup': 'beta_code',
40 'origin': None,
41 'name': 'phi7',
42 'location': 'local',
43 'type': 'text'},
44 {'encoding': 'utf-8',
45 'markup': 'plaintext',
46 'name': 'latin_proper_names_cltk',
47 'origin': 'https://github.com/cltk/latin_proper_names_cltk.git',
48 'location': 'remote',
49 'type': 'lexicon'},
50 {'origin': 'https://github.com/cltk/latin_models_cltk.git',
51 'name': 'latin_models_cltk',
52 'location': 'remote',
53 'type': 'model'},
54 {'encoding': 'utf-8',
55 'markup': 'python',
56 'name': 'latin_pos_lemmata_cltk',
57 'origin': 'https://github.com/cltk/latin_pos_lemmata_cltk.git',
58 'location': 'remote',
59 'type': 'lemma'},
60 {'encoding': 'utf-8',
61 'markup': 'xml',
62 'name': 'latin_treebank_index_thomisticus',
63 'origin': 'https://github.com/cltk/latin_treebank_index_thomisticus.git',
64 'location': 'remote',
65 'type': 'treebank'},
66 {'encoding': 'xml',
67 'markup': 'plaintext',
68 'name': 'latin_lexica_perseus',
69 'origin': 'https://github.com/cltk/latin_lexica_perseus.git',
70 'location': 'remote',
71 'type': 'lexicon'},
72 {'encoding': 'utf-8',
73 'markup': 'plaintext',
74 'name': 'latin_training_set_sentence_cltk',
75 'origin': 'https://github.com/cltk/latin_training_set_sentence_cltk.git',
76 'location': 'remote',
77 'type': 'training_set'},
78 {'origin': 'https://github.com/cltk/latin_word2vec_cltk.git',
79 'name': 'latin_word2vec_cltk',
80 'location': 'remote',
81 'type': 'model'},
82 {'encoding': 'utf-8',
83 'markup': 'tei_xml',
84 'location': 'remote',
85 'type': 'text',
86 'name': 'latin_text_antique_digiliblt',
87 'origin': 'https://github.com/cltk/latin_text_antique_digiliblt.git'},
88 {'location': 'remote',
89 'type': 'text',
90 'name': 'latin_text_corpus_grammaticorum_latinorum',
91 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}
92 ]
93
[end of cltk/corpus/latin/corpora.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cltk/corpus/latin/corpora.py b/cltk/corpus/latin/corpora.py
--- a/cltk/corpus/latin/corpora.py
+++ b/cltk/corpus/latin/corpora.py
@@ -88,5 +88,9 @@
{'location': 'remote',
'type': 'text',
'name': 'latin_text_corpus_grammaticorum_latinorum',
- 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}
+ 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},
+ {'location': 'remote',
+ 'type': 'text',
+ 'name': 'latin_text_poeti_ditalia',
+ 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}
]
| {"golden_diff": "diff --git a/cltk/corpus/latin/corpora.py b/cltk/corpus/latin/corpora.py\n--- a/cltk/corpus/latin/corpora.py\n+++ b/cltk/corpus/latin/corpora.py\n@@ -88,5 +88,9 @@\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_corpus_grammaticorum_latinorum',\n- 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}\n+ 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},\n+ {'location': 'remote',\n+ 'type': 'text',\n+ 'name': 'latin_text_poeti_ditalia',\n+ 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}\n ]\n", "issue": "Compile Poeti d\u2019Italia for CLTK\nhttp://www.mqdq.it/mqdq/poetiditalia/indice_autori_alfa.jsp?scelta=AZ&path=metri_opere\n\n", "before_files": [{"content": "\"\"\"Latin language corpora available for download or loading locally.\nAll remote corpora hosted by github on the cltk organization account, eg:\n'http://github.com/cltk' + name\n\"\"\"\n\nLATIN_CORPORA = [\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_perseus',\n 'origin': 'https://github.com/cltk/latin_text_perseus.git'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_text_latin_library',\n 'origin': 'https://github.com/cltk/latin_text_latin_library.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'name': '',\n 'location': 'local',\n 'name': 'phi5',\n 'origin': None,\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'origin': None,\n 'name': 'phi7',\n 'location': 'local',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_proper_names_cltk',\n 'origin': 'https://github.com/cltk/latin_proper_names_cltk.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'origin': 'https://github.com/cltk/latin_models_cltk.git',\n 'name': 'latin_models_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'python',\n 'name': 'latin_pos_lemmata_cltk',\n 'origin': 'https://github.com/cltk/latin_pos_lemmata_cltk.git',\n 'location': 'remote',\n 'type': 'lemma'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_index_thomisticus',\n 'origin': 'https://github.com/cltk/latin_treebank_index_thomisticus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'xml',\n 'markup': 'plaintext',\n 'name': 'latin_lexica_perseus',\n 'origin': 'https://github.com/cltk/latin_lexica_perseus.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_training_set_sentence_cltk',\n 'origin': 'https://github.com/cltk/latin_training_set_sentence_cltk.git',\n 'location': 'remote',\n 'type': 'training_set'},\n {'origin': 'https://github.com/cltk/latin_word2vec_cltk.git',\n 'name': 'latin_word2vec_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_antique_digiliblt',\n 'origin': 'https://github.com/cltk/latin_text_antique_digiliblt.git'},\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_corpus_grammaticorum_latinorum',\n 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}\n]\n", "path": "cltk/corpus/latin/corpora.py"}]} | 1,691 | 201 |
gh_patches_debug_9824 | rasdani/github-patches | git_diff | coala__coala-1585 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DefaultArgParser: Fix spelling mistake
Change analaysis on line 30 to analysis
</issue>
<code>
[start of coalib/parsing/DefaultArgParser.py]
1 import argparse
2 import sys
3
4 from coalib.misc import Constants
5
6
7 def default_arg_parser(formatter_class=None):
8 """
9 This function creates an ArgParser to parse command line arguments.
10
11 :param formatter_class: Formatting the arg_parser output into a specific
12 form. For example: In the manpage format.
13 """
14 formatter_class = formatter_class or argparse.RawDescriptionHelpFormatter
15
16 entry_point = sys.argv[0]
17 for entry in ['coala-ci', 'coala-dbus', 'coala-format', 'coala-json',
18 'coala-delete-orig']:
19 if entry_point.endswith(entry):
20 parser_type = entry
21 break
22 else:
23 parser_type = 'coala'
24
25 arg_parser = argparse.ArgumentParser(
26 formatter_class=formatter_class,
27 prog="coala",
28 description="coala is a simple COde AnaLysis Application. Its goal "
29 "is to make static code analysis easy and convenient "
30 "for all languages. coala uses bears, which are analaysis "
31 "routines that can be combined arbitrarily.")
32
33 arg_parser.add_argument('TARGETS',
34 nargs='*',
35 help="Sections to be executed exclusively.")
36 arg_parser.add_argument('-c',
37 '--config',
38 nargs=1,
39 metavar='FILE',
40 help='Configuration file to be used, defaults to '
41 + repr(Constants.default_coafile))
42 FIND_CONFIG_HELP = ('Attempt to find config file by checking parent '
43 'directories of the current working directory. It is '
44 'assumed that the config file is named '
45 + repr(Constants.default_coafile) + '. This arg is '
46 'ignored if --config is also given')
47 arg_parser.add_argument('-F',
48 '--find-config',
49 nargs='?',
50 const=True,
51 metavar='BOOL',
52 help=FIND_CONFIG_HELP)
53 arg_parser.add_argument('-f',
54 '--files',
55 nargs='+',
56 metavar='FILE',
57 help='Files that should be checked')
58 arg_parser.add_argument('-i',
59 '--ignore',
60 nargs='+',
61 metavar='FILE',
62 help='Files that should be ignored')
63 arg_parser.add_argument('--limit-files',
64 nargs='+',
65 metavar='FILE',
66 help='Files that will be analyzed will be '
67 'restricted to those in the globs listed '
68 'in this argument as well the files setting')
69 arg_parser.add_argument('-b',
70 '--bears',
71 nargs='+',
72 metavar='NAME',
73 help='Names of bears to use')
74 BEAR_DIRS_HELP = 'Additional directories where bears may lie'
75 arg_parser.add_argument('-d',
76 '--bear-dirs',
77 nargs='+',
78 metavar='DIR',
79 help=BEAR_DIRS_HELP)
80 LOG_LEVEL_HELP = ("Enum('ERROR','INFO','WARNING','DEBUG') to set level of "
81 "log output")
82 arg_parser.add_argument('-L',
83 '--log-level',
84 nargs=1,
85 choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'],
86 metavar='ENUM',
87 help=LOG_LEVEL_HELP)
88 MIN_SEVERITY_HELP = ("Enum('INFO', 'NORMAL', 'MAJOR') to set the minimal "
89 "result severity.")
90 arg_parser.add_argument('-m',
91 '--min-severity',
92 nargs=1,
93 choices=('INFO', 'NORMAL', 'MAJOR'),
94 metavar='ENUM',
95 help=MIN_SEVERITY_HELP)
96 SETTINGS_HELP = 'Arbitrary settings in the form of section.key=value'
97 arg_parser.add_argument('-S',
98 '--settings',
99 nargs='+',
100 metavar='SETTING',
101 help=SETTINGS_HELP)
102 if parser_type == 'coala-json':
103 arg_parser.add_argument('--text-logs',
104 nargs='?',
105 const=True,
106 metavar='BOOL',
107 help='Don\'t display logs as json, display '
108 'them as we normally do in the console.')
109 if parser_type == 'coala':
110 SHOW_BEARS_HELP = ("Display bears and its metadata with the sections "
111 "that they belong to")
112 arg_parser.add_argument('-B',
113 '--show-bears',
114 nargs='?',
115 const=True,
116 metavar='BOOL',
117 help=SHOW_BEARS_HELP)
118 arg_parser.add_argument('-A',
119 '--show-all-bears',
120 nargs='?',
121 const=True,
122 metavar='BOOL',
123 help="Display all bears.")
124 SAVE_HELP = ('Filename of file to be saved to, if provided with no '
125 'arguments, settings will be stored back to the file given '
126 'by -c')
127 arg_parser.add_argument('-s',
128 '--save',
129 nargs='?',
130 const=True,
131 metavar='FILE',
132 help=SAVE_HELP)
133 TAG_HELP = ('Tag results with a specific name. You can access the results'
134 ' later with that tag.')
135 arg_parser.add_argument('-t',
136 '--tag',
137 nargs='?',
138 const=True,
139 metavar='STRING',
140 help=TAG_HELP)
141
142 DELETE_TAG_HELP = 'Delete pre-tagged results with tag name.'
143 arg_parser.add_argument('-g',
144 '--dtag',
145 nargs='?',
146 const=True,
147 metavar='STRING',
148 help=DELETE_TAG_HELP)
149
150 arg_parser.add_argument("-j",
151 "--jobs",
152 type=int,
153 help="Number of jobs to use in parallel.")
154
155 arg_parser.add_argument('-v',
156 '--version',
157 action='version',
158 version=Constants.VERSION)
159
160 arg_parser.add_argument('-n',
161 '--no-orig',
162 nargs='?',
163 const=True,
164 help="Deactivate creation of .orig files,"
165 ".orig backup files before applying patches")
166
167 return arg_parser
168
[end of coalib/parsing/DefaultArgParser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/parsing/DefaultArgParser.py b/coalib/parsing/DefaultArgParser.py
--- a/coalib/parsing/DefaultArgParser.py
+++ b/coalib/parsing/DefaultArgParser.py
@@ -27,7 +27,7 @@
prog="coala",
description="coala is a simple COde AnaLysis Application. Its goal "
"is to make static code analysis easy and convenient "
- "for all languages. coala uses bears, which are analaysis "
+ "for all languages. coala uses bears, which are analysis "
"routines that can be combined arbitrarily.")
arg_parser.add_argument('TARGETS',
| {"golden_diff": "diff --git a/coalib/parsing/DefaultArgParser.py b/coalib/parsing/DefaultArgParser.py\n--- a/coalib/parsing/DefaultArgParser.py\n+++ b/coalib/parsing/DefaultArgParser.py\n@@ -27,7 +27,7 @@\n prog=\"coala\",\n description=\"coala is a simple COde AnaLysis Application. Its goal \"\n \"is to make static code analysis easy and convenient \"\n- \"for all languages. coala uses bears, which are analaysis \"\n+ \"for all languages. coala uses bears, which are analysis \"\n \"routines that can be combined arbitrarily.\")\n \n arg_parser.add_argument('TARGETS',\n", "issue": "DefaultArgParser: Fix spelling mistake\nChange analaysis on line 30 to analysis\n\n", "before_files": [{"content": "import argparse\nimport sys\n\nfrom coalib.misc import Constants\n\n\ndef default_arg_parser(formatter_class=None):\n \"\"\"\n This function creates an ArgParser to parse command line arguments.\n\n :param formatter_class: Formatting the arg_parser output into a specific\n form. For example: In the manpage format.\n \"\"\"\n formatter_class = formatter_class or argparse.RawDescriptionHelpFormatter\n\n entry_point = sys.argv[0]\n for entry in ['coala-ci', 'coala-dbus', 'coala-format', 'coala-json',\n 'coala-delete-orig']:\n if entry_point.endswith(entry):\n parser_type = entry\n break\n else:\n parser_type = 'coala'\n\n arg_parser = argparse.ArgumentParser(\n formatter_class=formatter_class,\n prog=\"coala\",\n description=\"coala is a simple COde AnaLysis Application. Its goal \"\n \"is to make static code analysis easy and convenient \"\n \"for all languages. coala uses bears, which are analaysis \"\n \"routines that can be combined arbitrarily.\")\n\n arg_parser.add_argument('TARGETS',\n nargs='*',\n help=\"Sections to be executed exclusively.\")\n arg_parser.add_argument('-c',\n '--config',\n nargs=1,\n metavar='FILE',\n help='Configuration file to be used, defaults to '\n + repr(Constants.default_coafile))\n FIND_CONFIG_HELP = ('Attempt to find config file by checking parent '\n 'directories of the current working directory. It is '\n 'assumed that the config file is named '\n + repr(Constants.default_coafile) + '. This arg is '\n 'ignored if --config is also given')\n arg_parser.add_argument('-F',\n '--find-config',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=FIND_CONFIG_HELP)\n arg_parser.add_argument('-f',\n '--files',\n nargs='+',\n metavar='FILE',\n help='Files that should be checked')\n arg_parser.add_argument('-i',\n '--ignore',\n nargs='+',\n metavar='FILE',\n help='Files that should be ignored')\n arg_parser.add_argument('--limit-files',\n nargs='+',\n metavar='FILE',\n help='Files that will be analyzed will be '\n 'restricted to those in the globs listed '\n 'in this argument as well the files setting')\n arg_parser.add_argument('-b',\n '--bears',\n nargs='+',\n metavar='NAME',\n help='Names of bears to use')\n BEAR_DIRS_HELP = 'Additional directories where bears may lie'\n arg_parser.add_argument('-d',\n '--bear-dirs',\n nargs='+',\n metavar='DIR',\n help=BEAR_DIRS_HELP)\n LOG_LEVEL_HELP = (\"Enum('ERROR','INFO','WARNING','DEBUG') to set level of \"\n \"log output\")\n arg_parser.add_argument('-L',\n '--log-level',\n nargs=1,\n choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'],\n metavar='ENUM',\n help=LOG_LEVEL_HELP)\n MIN_SEVERITY_HELP = (\"Enum('INFO', 'NORMAL', 'MAJOR') to set the minimal \"\n \"result severity.\")\n arg_parser.add_argument('-m',\n '--min-severity',\n nargs=1,\n choices=('INFO', 'NORMAL', 'MAJOR'),\n metavar='ENUM',\n help=MIN_SEVERITY_HELP)\n SETTINGS_HELP = 'Arbitrary settings in the form of section.key=value'\n arg_parser.add_argument('-S',\n '--settings',\n nargs='+',\n metavar='SETTING',\n help=SETTINGS_HELP)\n if parser_type == 'coala-json':\n arg_parser.add_argument('--text-logs',\n nargs='?',\n const=True,\n metavar='BOOL',\n help='Don\\'t display logs as json, display '\n 'them as we normally do in the console.')\n if parser_type == 'coala':\n SHOW_BEARS_HELP = (\"Display bears and its metadata with the sections \"\n \"that they belong to\")\n arg_parser.add_argument('-B',\n '--show-bears',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=SHOW_BEARS_HELP)\n arg_parser.add_argument('-A',\n '--show-all-bears',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=\"Display all bears.\")\n SAVE_HELP = ('Filename of file to be saved to, if provided with no '\n 'arguments, settings will be stored back to the file given '\n 'by -c')\n arg_parser.add_argument('-s',\n '--save',\n nargs='?',\n const=True,\n metavar='FILE',\n help=SAVE_HELP)\n TAG_HELP = ('Tag results with a specific name. You can access the results'\n ' later with that tag.')\n arg_parser.add_argument('-t',\n '--tag',\n nargs='?',\n const=True,\n metavar='STRING',\n help=TAG_HELP)\n\n DELETE_TAG_HELP = 'Delete pre-tagged results with tag name.'\n arg_parser.add_argument('-g',\n '--dtag',\n nargs='?',\n const=True,\n metavar='STRING',\n help=DELETE_TAG_HELP)\n\n arg_parser.add_argument(\"-j\",\n \"--jobs\",\n type=int,\n help=\"Number of jobs to use in parallel.\")\n\n arg_parser.add_argument('-v',\n '--version',\n action='version',\n version=Constants.VERSION)\n\n arg_parser.add_argument('-n',\n '--no-orig',\n nargs='?',\n const=True,\n help=\"Deactivate creation of .orig files,\"\n \".orig backup files before applying patches\")\n\n return arg_parser\n", "path": "coalib/parsing/DefaultArgParser.py"}]} | 2,160 | 154 |
gh_patches_debug_18951 | rasdani/github-patches | git_diff | ansible__ansible-23067 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
win_domain_controller: documentation error in examples '_pass' should be '_password'
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and master branch are affected too.
-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Documentation Report
##### COMPONENT NAME
<!--- Name of the module/plugin/task/feature -->
win_domain_controller
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.3.0.0 (detached HEAD e4494f85b6) last updated 2017/03/17 12:34:17 (GMT +100)
config file = /home/jon/ansible/ansible.cfg
configured module search path = [u'/home/jon/ansible/library']
python version = 2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2]
```
##### CONFIGURATION
<!---
Mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say “N/A” for anything that is not platform-specific.
-->
Ansible controller Windows 10 WSL (ubuntu)
targets: Windows Server 2012 R2
##### SUMMARY
<!--- Explain the problem briefly -->
The examples need updating to match the correct parameter names for the _password parameters.
For example in the examples domain_admin_pass, but the module actually expects 'domain_admin_password'. The same thing is true for 'safe_mode_password' and 'local_admin_password'.
Would create a PR for this but am away from dev machine at the moment.
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used.
-->
N/A
<!--- Paste example playbooks or commands between quotes below -->
```yaml
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
N/A
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
N/A
<!--- Paste verbatim command output between quotes below -->
```
```
</issue>
<code>
[start of lib/ansible/modules/windows/win_domain_controller.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2017, Red Hat, Inc.
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 ANSIBLE_METADATA = {'metadata_version': '1.0',
22 'status': ['preview'],
23 'supported_by': 'core'}
24
25
26 DOCUMENTATION='''
27 module: win_domain_controller
28 short_description: Manage domain controller/member server state for a Windows host
29 version_added: 2.3
30 description:
31 - Ensure that a Windows Server 2012+ host is configured as a domain controller or demoted to member server. This module may require
32 subsequent use of the M(win_reboot) action if changes are made.
33 options:
34 dns_domain_name:
35 description:
36 - when C(state) is C(domain_controller), the DNS name of the domain for which the targeted Windows host should be a DC
37 domain_admin_user:
38 description:
39 - username of a domain admin for the target domain (necessary to promote or demote a domain controller)
40 required: true
41 domain_admin_password:
42 description:
43 - password for the specified C(domain_admin_user)
44 required: true
45 safe_mode_password:
46 description:
47 - safe mode password for the domain controller (required when C(state) is C(domain_controller))
48 local_admin_password:
49 description:
50 - password to be assigned to the local C(Administrator) user (required when C(state) is C(member_server))
51 state:
52 description:
53 - whether the target host should be a domain controller or a member server
54 choices:
55 - domain_controller
56 - member_server
57 author:
58 - Matt Davis (@nitzmahone)
59 '''
60
61 RETURN='''
62 reboot_required:
63 description: True if changes were made that require a reboot.
64 returned: always
65 type: boolean
66 sample: true
67
68 '''
69
70 EXAMPLES=r'''
71 # ensure a server is a domain controller
72 - hosts: winclient
73 gather_facts: no
74 tasks:
75 - win_domain_controller:
76 dns_domain_name: ansible.vagrant
77 domain_admin_user: [email protected]
78 domain_admin_pass: password123!
79 safe_mode_pass: password123!
80 state: domain_controller
81 log_path: c:\ansible_win_domain_controller.txt
82
83 # ensure a server is not a domain controller
84 # note that without an action wrapper, in the case where a DC is demoted,
85 # the task will fail with a 401 Unauthorized, because the domain credential
86 # becomes invalid to fetch the final output over WinRM. This requires win_async
87 # with credential switching (or other clever credential-switching
88 # mechanism to get the output and trigger the required reboot)
89 - hosts: winclient
90 gather_facts: no
91 tasks:
92 - win_domain_controller:
93 domain_admin_user: [email protected]
94 domain_admin_pass: password123!
95 local_admin_pass: password123!
96 state: member_server
97 log_path: c:\ansible_win_domain_controller.txt
98
99 '''
100
101
[end of lib/ansible/modules/windows/win_domain_controller.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/modules/windows/win_domain_controller.py b/lib/ansible/modules/windows/win_domain_controller.py
--- a/lib/ansible/modules/windows/win_domain_controller.py
+++ b/lib/ansible/modules/windows/win_domain_controller.py
@@ -75,8 +75,8 @@
- win_domain_controller:
dns_domain_name: ansible.vagrant
domain_admin_user: [email protected]
- domain_admin_pass: password123!
- safe_mode_pass: password123!
+ domain_admin_password: password123!
+ safe_mode_password: password123!
state: domain_controller
log_path: c:\ansible_win_domain_controller.txt
@@ -91,8 +91,8 @@
tasks:
- win_domain_controller:
domain_admin_user: [email protected]
- domain_admin_pass: password123!
- local_admin_pass: password123!
+ domain_admin_password: password123!
+ local_admin_password: password123!
state: member_server
log_path: c:\ansible_win_domain_controller.txt
| {"golden_diff": "diff --git a/lib/ansible/modules/windows/win_domain_controller.py b/lib/ansible/modules/windows/win_domain_controller.py\n--- a/lib/ansible/modules/windows/win_domain_controller.py\n+++ b/lib/ansible/modules/windows/win_domain_controller.py\n@@ -75,8 +75,8 @@\n - win_domain_controller:\n dns_domain_name: ansible.vagrant\n domain_admin_user: [email protected]\n- domain_admin_pass: password123!\n- safe_mode_pass: password123!\n+ domain_admin_password: password123!\n+ safe_mode_password: password123!\n state: domain_controller\n log_path: c:\\ansible_win_domain_controller.txt\n \n@@ -91,8 +91,8 @@\n tasks:\n - win_domain_controller:\n domain_admin_user: [email protected]\n- domain_admin_pass: password123!\n- local_admin_pass: password123!\n+ domain_admin_password: password123!\n+ local_admin_password: password123!\n state: member_server\n log_path: c:\\ansible_win_domain_controller.txt\n", "issue": "win_domain_controller: documentation error in examples '_pass' should be '_password'\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n\r\n - Documentation Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Name of the module/plugin/task/feature -->\r\nwin_domain_controller\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\r\n```\r\nansible 2.3.0.0 (detached HEAD e4494f85b6) last updated 2017/03/17 12:34:17 (GMT +100)\r\n config file = /home/jon/ansible/ansible.cfg\r\n configured module search path = [u'/home/jon/ansible/library']\r\n python version = 2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nMention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n-->\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \u201cN/A\u201d for anything that is not platform-specific.\r\n-->\r\nAnsible controller Windows 10 WSL (ubuntu)\r\ntargets: Windows Server 2012 R2\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\n\r\nThe examples need updating to match the correct parameter names for the _password parameters.\r\nFor example in the examples domain_admin_pass, but the module actually expects 'domain_admin_password'. The same thing is true for 'safe_mode_password' and 'local_admin_password'.\r\n\r\nWould create a PR for this but am away from dev machine at the moment.\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\nN/A\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nN/A\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nN/A\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\n\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2017, Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['preview'],\n 'supported_by': 'core'}\n\n\nDOCUMENTATION='''\nmodule: win_domain_controller\nshort_description: Manage domain controller/member server state for a Windows host\nversion_added: 2.3\ndescription:\n - Ensure that a Windows Server 2012+ host is configured as a domain controller or demoted to member server. This module may require\n subsequent use of the M(win_reboot) action if changes are made.\noptions:\n dns_domain_name:\n description:\n - when C(state) is C(domain_controller), the DNS name of the domain for which the targeted Windows host should be a DC\n domain_admin_user:\n description:\n - username of a domain admin for the target domain (necessary to promote or demote a domain controller)\n required: true\n domain_admin_password:\n description:\n - password for the specified C(domain_admin_user)\n required: true\n safe_mode_password:\n description:\n - safe mode password for the domain controller (required when C(state) is C(domain_controller))\n local_admin_password:\n description:\n - password to be assigned to the local C(Administrator) user (required when C(state) is C(member_server))\n state:\n description:\n - whether the target host should be a domain controller or a member server\n choices:\n - domain_controller\n - member_server\nauthor:\n - Matt Davis (@nitzmahone)\n'''\n\nRETURN='''\nreboot_required:\n description: True if changes were made that require a reboot.\n returned: always\n type: boolean\n sample: true\n\n'''\n\nEXAMPLES=r'''\n# ensure a server is a domain controller\n- hosts: winclient\n gather_facts: no\n tasks:\n - win_domain_controller:\n dns_domain_name: ansible.vagrant\n domain_admin_user: [email protected]\n domain_admin_pass: password123!\n safe_mode_pass: password123!\n state: domain_controller\n log_path: c:\\ansible_win_domain_controller.txt\n\n# ensure a server is not a domain controller\n# note that without an action wrapper, in the case where a DC is demoted,\n# the task will fail with a 401 Unauthorized, because the domain credential\n# becomes invalid to fetch the final output over WinRM. This requires win_async\n# with credential switching (or other clever credential-switching\n# mechanism to get the output and trigger the required reboot)\n- hosts: winclient\n gather_facts: no\n tasks:\n - win_domain_controller:\n domain_admin_user: [email protected]\n domain_admin_pass: password123!\n local_admin_pass: password123!\n state: member_server\n log_path: c:\\ansible_win_domain_controller.txt\n\n'''\n\n", "path": "lib/ansible/modules/windows/win_domain_controller.py"}]} | 2,092 | 246 |
gh_patches_debug_7897 | rasdani/github-patches | git_diff | nautobot__nautobot-2640 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
wrong link from circuit types to circuit
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): eee34d7bc54d (v1.4.5)
* Python version: 3.10
* Database platform, version: postgresql 14
* Middleware(s):
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create circuit type
2. Create circuit of above circuit type
3. Go to Circuit Types
4. Click one circuit type
5. Click number of circuits belonging to circuit type
<!-- What did you expect to happen? -->
### Expected Behavior
redirect to `circuits/circuits/?type=XXX`
<!-- What happened instead? -->
### Observed Behavior
redirects to `circuits/circuits/?circuit_type=XXX`
which shows an "Invalid filters were specified" error
</issue>
<code>
[start of nautobot/circuits/tables.py]
1 import django_tables2 as tables
2 from django_tables2.utils import Accessor
3
4 from nautobot.extras.tables import StatusTableMixin
5 from nautobot.tenancy.tables import TenantColumn
6 from nautobot.utilities.tables import (
7 BaseTable,
8 ButtonsColumn,
9 TagColumn,
10 ToggleColumn,
11 )
12 from .models import Circuit, CircuitType, Provider, ProviderNetwork
13
14 CIRCUIT_TERMINATION_PARENT = """
15 {% if value.provider_network %}
16 <a href="{{ value.provider_network.get_absolute_url }}">{{ value.provider_network }}</a>
17 {% elif value.site %}
18 <a href="{{ value.site.get_absolute_url }}">{{ value.site }}</a>
19 {% endif %}
20 """
21
22 #
23 # Provider Network
24 #
25
26
27 class ProviderNetworkTable(BaseTable):
28 pk = ToggleColumn()
29 name = tables.Column(linkify=True)
30 provider = tables.Column(linkify=True)
31 tags = TagColumn(url_name="circuits:providernetwork_list")
32
33 class Meta(BaseTable.Meta):
34 model = ProviderNetwork
35 fields = ("pk", "name", "provider", "description", "tags")
36 default_columns = ("pk", "name", "provider", "description")
37
38
39 #
40 # Providers
41 #
42
43
44 class ProviderTable(BaseTable):
45 pk = ToggleColumn()
46 name = tables.LinkColumn()
47 circuit_count = tables.Column(accessor=Accessor("count_circuits"), verbose_name="Circuits")
48 tags = TagColumn(url_name="circuits:provider_list")
49
50 class Meta(BaseTable.Meta):
51 model = Provider
52 fields = (
53 "pk",
54 "name",
55 "asn",
56 "account",
57 "portal_url",
58 "noc_contact",
59 "admin_contact",
60 "circuit_count",
61 "tags",
62 )
63 default_columns = ("pk", "name", "asn", "account", "circuit_count")
64
65
66 #
67 # Circuit types
68 #
69
70
71 class CircuitTypeTable(BaseTable):
72 pk = ToggleColumn()
73 name = tables.LinkColumn()
74 circuit_count = tables.Column(verbose_name="Circuits")
75 actions = ButtonsColumn(CircuitType, pk_field="slug")
76
77 class Meta(BaseTable.Meta):
78 model = CircuitType
79 fields = ("pk", "name", "circuit_count", "description", "slug", "actions")
80 default_columns = (
81 "pk",
82 "name",
83 "circuit_count",
84 "description",
85 "slug",
86 "actions",
87 )
88
89
90 #
91 # Circuits
92 #
93
94
95 class CircuitTable(StatusTableMixin, BaseTable):
96 pk = ToggleColumn()
97 cid = tables.LinkColumn(verbose_name="ID")
98 provider = tables.LinkColumn(viewname="circuits:provider", args=[Accessor("provider__slug")])
99 tenant = TenantColumn()
100 tags = TagColumn(url_name="circuits:circuit_list")
101
102 termination_a = tables.TemplateColumn(
103 template_code=CIRCUIT_TERMINATION_PARENT,
104 accessor=Accessor("termination_a"),
105 orderable=False,
106 verbose_name="Side A",
107 )
108 termination_z = tables.TemplateColumn(
109 template_code=CIRCUIT_TERMINATION_PARENT,
110 accessor=Accessor("termination_z"),
111 orderable=False,
112 verbose_name="Side Z",
113 )
114
115 class Meta(BaseTable.Meta):
116 model = Circuit
117 fields = (
118 "pk",
119 "cid",
120 "provider",
121 "type",
122 "status",
123 "tenant",
124 "termination_a",
125 "termination_z",
126 "install_date",
127 "commit_rate",
128 "description",
129 "tags",
130 )
131 default_columns = (
132 "pk",
133 "cid",
134 "provider",
135 "type",
136 "status",
137 "tenant",
138 "termination_a",
139 "termination_z",
140 "description",
141 )
142
[end of nautobot/circuits/tables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/circuits/tables.py b/nautobot/circuits/tables.py
--- a/nautobot/circuits/tables.py
+++ b/nautobot/circuits/tables.py
@@ -12,10 +12,13 @@
from .models import Circuit, CircuitType, Provider, ProviderNetwork
CIRCUIT_TERMINATION_PARENT = """
+{% load helpers %}
{% if value.provider_network %}
-<a href="{{ value.provider_network.get_absolute_url }}">{{ value.provider_network }}</a>
+{{ value.provider_network|hyperlinked_object }}
{% elif value.site %}
-<a href="{{ value.site.get_absolute_url }}">{{ value.site }}</a>
+{{ value.site|hyperlinked_object }}
+{% else %}
+{{ None|placeholder }}
{% endif %}
"""
| {"golden_diff": "diff --git a/nautobot/circuits/tables.py b/nautobot/circuits/tables.py\n--- a/nautobot/circuits/tables.py\n+++ b/nautobot/circuits/tables.py\n@@ -12,10 +12,13 @@\n from .models import Circuit, CircuitType, Provider, ProviderNetwork\n \n CIRCUIT_TERMINATION_PARENT = \"\"\"\n+{% load helpers %}\n {% if value.provider_network %}\n-<a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n+{{ value.provider_network|hyperlinked_object }}\n {% elif value.site %}\n-<a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n+{{ value.site|hyperlinked_object }}\n+{% else %}\n+{{ None|placeholder }}\n {% endif %}\n \"\"\"\n", "issue": "wrong link from circuit types to circuit\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): eee34d7bc54d (v1.4.5)\r\n* Python version: 3.10\r\n* Database platform, version: postgresql 14\r\n* Middleware(s):\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create circuit type\r\n2. Create circuit of above circuit type\r\n3. Go to Circuit Types\r\n4. Click one circuit type\r\n5. Click number of circuits belonging to circuit type\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nredirect to `circuits/circuits/?type=XXX`\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nredirects to `circuits/circuits/?circuit_type=XXX`\r\nwhich shows an \"Invalid filters were specified\" error\n", "before_files": [{"content": "import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom nautobot.extras.tables import StatusTableMixin\nfrom nautobot.tenancy.tables import TenantColumn\nfrom nautobot.utilities.tables import (\n BaseTable,\n ButtonsColumn,\n TagColumn,\n ToggleColumn,\n)\nfrom .models import Circuit, CircuitType, Provider, ProviderNetwork\n\nCIRCUIT_TERMINATION_PARENT = \"\"\"\n{% if value.provider_network %}\n<a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n{% elif value.site %}\n<a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n{% endif %}\n\"\"\"\n\n#\n# Provider Network\n#\n\n\nclass ProviderNetworkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n provider = tables.Column(linkify=True)\n tags = TagColumn(url_name=\"circuits:providernetwork_list\")\n\n class Meta(BaseTable.Meta):\n model = ProviderNetwork\n fields = (\"pk\", \"name\", \"provider\", \"description\", \"tags\")\n default_columns = (\"pk\", \"name\", \"provider\", \"description\")\n\n\n#\n# Providers\n#\n\n\nclass ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n circuit_count = tables.Column(accessor=Accessor(\"count_circuits\"), verbose_name=\"Circuits\")\n tags = TagColumn(url_name=\"circuits:provider_list\")\n\n class Meta(BaseTable.Meta):\n model = Provider\n fields = (\n \"pk\",\n \"name\",\n \"asn\",\n \"account\",\n \"portal_url\",\n \"noc_contact\",\n \"admin_contact\",\n \"circuit_count\",\n \"tags\",\n )\n default_columns = (\"pk\", \"name\", \"asn\", \"account\", \"circuit_count\")\n\n\n#\n# Circuit types\n#\n\n\nclass CircuitTypeTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n circuit_count = tables.Column(verbose_name=\"Circuits\")\n actions = ButtonsColumn(CircuitType, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = CircuitType\n fields = (\"pk\", \"name\", \"circuit_count\", \"description\", \"slug\", \"actions\")\n default_columns = (\n \"pk\",\n \"name\",\n \"circuit_count\",\n \"description\",\n \"slug\",\n \"actions\",\n )\n\n\n#\n# Circuits\n#\n\n\nclass CircuitTable(StatusTableMixin, BaseTable):\n pk = ToggleColumn()\n cid = tables.LinkColumn(verbose_name=\"ID\")\n provider = tables.LinkColumn(viewname=\"circuits:provider\", args=[Accessor(\"provider__slug\")])\n tenant = TenantColumn()\n tags = TagColumn(url_name=\"circuits:circuit_list\")\n\n termination_a = tables.TemplateColumn(\n template_code=CIRCUIT_TERMINATION_PARENT,\n accessor=Accessor(\"termination_a\"),\n orderable=False,\n verbose_name=\"Side A\",\n )\n termination_z = tables.TemplateColumn(\n template_code=CIRCUIT_TERMINATION_PARENT,\n accessor=Accessor(\"termination_z\"),\n orderable=False,\n verbose_name=\"Side Z\",\n )\n\n class Meta(BaseTable.Meta):\n model = Circuit\n fields = (\n \"pk\",\n \"cid\",\n \"provider\",\n \"type\",\n \"status\",\n \"tenant\",\n \"termination_a\",\n \"termination_z\",\n \"install_date\",\n \"commit_rate\",\n \"description\",\n \"tags\",\n )\n default_columns = (\n \"pk\",\n \"cid\",\n \"provider\",\n \"type\",\n \"status\",\n \"tenant\",\n \"termination_a\",\n \"termination_z\",\n \"description\",\n )\n", "path": "nautobot/circuits/tables.py"}]} | 2,047 | 168 |
gh_patches_debug_4998 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Expand book data task is re-setting default editions
it should not.
</issue>
<code>
[start of bookwyrm/connectors/abstract_connector.py]
1 ''' functionality outline for a book data connector '''
2 from abc import ABC, abstractmethod
3 from dataclasses import asdict, dataclass
4 import logging
5 from urllib3.exceptions import RequestError
6
7 from django.db import transaction
8 import requests
9 from requests.exceptions import SSLError
10
11 from bookwyrm import activitypub, models, settings
12 from .connector_manager import load_more_data, ConnectorException
13
14
15 logger = logging.getLogger(__name__)
16 class AbstractMinimalConnector(ABC):
17 ''' just the bare bones, for other bookwyrm instances '''
18 def __init__(self, identifier):
19 # load connector settings
20 info = models.Connector.objects.get(identifier=identifier)
21 self.connector = info
22
23 # the things in the connector model to copy over
24 self_fields = [
25 'base_url',
26 'books_url',
27 'covers_url',
28 'search_url',
29 'max_query_count',
30 'name',
31 'identifier',
32 'local'
33 ]
34 for field in self_fields:
35 setattr(self, field, getattr(info, field))
36
37 def search(self, query, min_confidence=None):
38 ''' free text search '''
39 params = {}
40 if min_confidence:
41 params['min_confidence'] = min_confidence
42
43 resp = requests.get(
44 '%s%s' % (self.search_url, query),
45 params=params,
46 headers={
47 'Accept': 'application/json; charset=utf-8',
48 'User-Agent': settings.USER_AGENT,
49 },
50 )
51 if not resp.ok:
52 resp.raise_for_status()
53 try:
54 data = resp.json()
55 except ValueError as e:
56 logger.exception(e)
57 raise ConnectorException('Unable to parse json response', e)
58 results = []
59
60 for doc in self.parse_search_data(data)[:10]:
61 results.append(self.format_search_result(doc))
62 return results
63
64 @abstractmethod
65 def get_or_create_book(self, remote_id):
66 ''' pull up a book record by whatever means possible '''
67
68 @abstractmethod
69 def parse_search_data(self, data):
70 ''' turn the result json from a search into a list '''
71
72 @abstractmethod
73 def format_search_result(self, search_result):
74 ''' create a SearchResult obj from json '''
75
76
77 class AbstractConnector(AbstractMinimalConnector):
78 ''' generic book data connector '''
79 def __init__(self, identifier):
80 super().__init__(identifier)
81 # fields we want to look for in book data to copy over
82 # title we handle separately.
83 self.book_mappings = []
84
85
86 def is_available(self):
87 ''' check if you're allowed to use this connector '''
88 if self.max_query_count is not None:
89 if self.connector.query_count >= self.max_query_count:
90 return False
91 return True
92
93
94 def get_or_create_book(self, remote_id):
95 ''' translate arbitrary json into an Activitypub dataclass '''
96 # first, check if we have the origin_id saved
97 existing = models.Edition.find_existing_by_remote_id(remote_id) or \
98 models.Work.find_existing_by_remote_id(remote_id)
99 if existing:
100 if hasattr(existing, 'get_default_editon'):
101 return existing.get_default_editon()
102 return existing
103
104 # load the json
105 data = get_data(remote_id)
106 mapped_data = dict_from_mappings(data, self.book_mappings)
107 if self.is_work_data(data):
108 try:
109 edition_data = self.get_edition_from_work_data(data)
110 except (KeyError, ConnectorException):
111 # hack: re-use the work data as the edition data
112 # this is why remote ids aren't necessarily unique
113 edition_data = data
114 work_data = mapped_data
115 else:
116 try:
117 work_data = self.get_work_from_edition_data(data)
118 work_data = dict_from_mappings(work_data, self.book_mappings)
119 except (KeyError, ConnectorException):
120 work_data = mapped_data
121 edition_data = data
122
123 if not work_data or not edition_data:
124 raise ConnectorException('Unable to load book data: %s' % remote_id)
125
126 with transaction.atomic():
127 # create activitypub object
128 work_activity = activitypub.Work(**work_data)
129 # this will dedupe automatically
130 work = work_activity.to_model(models.Work)
131 for author in self.get_authors_from_data(data):
132 work.authors.add(author)
133
134 edition = self.create_edition_from_data(work, edition_data)
135 load_more_data.delay(self.connector.id, work.id)
136 return edition
137
138
139 def create_edition_from_data(self, work, edition_data):
140 ''' if we already have the work, we're ready '''
141 mapped_data = dict_from_mappings(edition_data, self.book_mappings)
142 mapped_data['work'] = work.remote_id
143 edition_activity = activitypub.Edition(**mapped_data)
144 edition = edition_activity.to_model(models.Edition)
145 edition.connector = self.connector
146 edition.save()
147
148 work.default_edition = edition
149 work.save()
150
151 for author in self.get_authors_from_data(edition_data):
152 edition.authors.add(author)
153 if not edition.authors.exists() and work.authors.exists():
154 edition.authors.set(work.authors.all())
155
156 return edition
157
158
159 def get_or_create_author(self, remote_id):
160 ''' load that author '''
161 existing = models.Author.find_existing_by_remote_id(remote_id)
162 if existing:
163 return existing
164
165 data = get_data(remote_id)
166
167 mapped_data = dict_from_mappings(data, self.author_mappings)
168 activity = activitypub.Author(**mapped_data)
169 # this will dedupe
170 return activity.to_model(models.Author)
171
172
173 @abstractmethod
174 def is_work_data(self, data):
175 ''' differentiate works and editions '''
176
177 @abstractmethod
178 def get_edition_from_work_data(self, data):
179 ''' every work needs at least one edition '''
180
181 @abstractmethod
182 def get_work_from_edition_data(self, data):
183 ''' every edition needs a work '''
184
185 @abstractmethod
186 def get_authors_from_data(self, data):
187 ''' load author data '''
188
189 @abstractmethod
190 def expand_book_data(self, book):
191 ''' get more info on a book '''
192
193
194 def dict_from_mappings(data, mappings):
195 ''' create a dict in Activitypub format, using mappings supplies by
196 the subclass '''
197 result = {}
198 for mapping in mappings:
199 result[mapping.local_field] = mapping.get_value(data)
200 return result
201
202
203 def get_data(url):
204 ''' wrapper for request.get '''
205 try:
206 resp = requests.get(
207 url,
208 headers={
209 'Accept': 'application/json; charset=utf-8',
210 'User-Agent': settings.USER_AGENT,
211 },
212 )
213 except (RequestError, SSLError) as e:
214 logger.exception(e)
215 raise ConnectorException()
216
217 if not resp.ok:
218 try:
219 resp.raise_for_status()
220 except requests.exceptions.HTTPError as e:
221 logger.exception(e)
222 raise ConnectorException()
223 try:
224 data = resp.json()
225 except ValueError as e:
226 logger.exception(e)
227 raise ConnectorException()
228
229 return data
230
231
232 def get_image(url):
233 ''' wrapper for requesting an image '''
234 try:
235 resp = requests.get(
236 url,
237 headers={
238 'User-Agent': settings.USER_AGENT,
239 },
240 )
241 except (RequestError, SSLError):
242 return None
243 if not resp.ok:
244 return None
245 return resp
246
247
248 @dataclass
249 class SearchResult:
250 ''' standardized search result object '''
251 title: str
252 key: str
253 author: str
254 year: str
255 connector: object
256 confidence: int = 1
257
258 def __repr__(self):
259 return "<SearchResult key={!r} title={!r} author={!r}>".format(
260 self.key, self.title, self.author)
261
262 def json(self):
263 ''' serialize a connector for json response '''
264 serialized = asdict(self)
265 del serialized['connector']
266 return serialized
267
268
269 class Mapping:
270 ''' associate a local database field with a field in an external dataset '''
271 def __init__(self, local_field, remote_field=None, formatter=None):
272 noop = lambda x: x
273
274 self.local_field = local_field
275 self.remote_field = remote_field or local_field
276 self.formatter = formatter or noop
277
278 def get_value(self, data):
279 ''' pull a field from incoming json and return the formatted version '''
280 value = data.get(self.remote_field)
281 if not value:
282 return None
283 try:
284 return self.formatter(value)
285 except:# pylint: disable=bare-except
286 return None
287
[end of bookwyrm/connectors/abstract_connector.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/connectors/abstract_connector.py b/bookwyrm/connectors/abstract_connector.py
--- a/bookwyrm/connectors/abstract_connector.py
+++ b/bookwyrm/connectors/abstract_connector.py
@@ -145,8 +145,9 @@
edition.connector = self.connector
edition.save()
- work.default_edition = edition
- work.save()
+ if not work.default_edition:
+ work.default_edition = edition
+ work.save()
for author in self.get_authors_from_data(edition_data):
edition.authors.add(author)
| {"golden_diff": "diff --git a/bookwyrm/connectors/abstract_connector.py b/bookwyrm/connectors/abstract_connector.py\n--- a/bookwyrm/connectors/abstract_connector.py\n+++ b/bookwyrm/connectors/abstract_connector.py\n@@ -145,8 +145,9 @@\n edition.connector = self.connector\n edition.save()\n \n- work.default_edition = edition\n- work.save()\n+ if not work.default_edition:\n+ work.default_edition = edition\n+ work.save()\n \n for author in self.get_authors_from_data(edition_data):\n edition.authors.add(author)\n", "issue": "Expand book data task is re-setting default editions\nit should not.\n", "before_files": [{"content": "''' functionality outline for a book data connector '''\nfrom abc import ABC, abstractmethod\nfrom dataclasses import asdict, dataclass\nimport logging\nfrom urllib3.exceptions import RequestError\n\nfrom django.db import transaction\nimport requests\nfrom requests.exceptions import SSLError\n\nfrom bookwyrm import activitypub, models, settings\nfrom .connector_manager import load_more_data, ConnectorException\n\n\nlogger = logging.getLogger(__name__)\nclass AbstractMinimalConnector(ABC):\n ''' just the bare bones, for other bookwyrm instances '''\n def __init__(self, identifier):\n # load connector settings\n info = models.Connector.objects.get(identifier=identifier)\n self.connector = info\n\n # the things in the connector model to copy over\n self_fields = [\n 'base_url',\n 'books_url',\n 'covers_url',\n 'search_url',\n 'max_query_count',\n 'name',\n 'identifier',\n 'local'\n ]\n for field in self_fields:\n setattr(self, field, getattr(info, field))\n\n def search(self, query, min_confidence=None):\n ''' free text search '''\n params = {}\n if min_confidence:\n params['min_confidence'] = min_confidence\n\n resp = requests.get(\n '%s%s' % (self.search_url, query),\n params=params,\n headers={\n 'Accept': 'application/json; charset=utf-8',\n 'User-Agent': settings.USER_AGENT,\n },\n )\n if not resp.ok:\n resp.raise_for_status()\n try:\n data = resp.json()\n except ValueError as e:\n logger.exception(e)\n raise ConnectorException('Unable to parse json response', e)\n results = []\n\n for doc in self.parse_search_data(data)[:10]:\n results.append(self.format_search_result(doc))\n return results\n\n @abstractmethod\n def get_or_create_book(self, remote_id):\n ''' pull up a book record by whatever means possible '''\n\n @abstractmethod\n def parse_search_data(self, data):\n ''' turn the result json from a search into a list '''\n\n @abstractmethod\n def format_search_result(self, search_result):\n ''' create a SearchResult obj from json '''\n\n\nclass AbstractConnector(AbstractMinimalConnector):\n ''' generic book data connector '''\n def __init__(self, identifier):\n super().__init__(identifier)\n # fields we want to look for in book data to copy over\n # title we handle separately.\n self.book_mappings = []\n\n\n def is_available(self):\n ''' check if you're allowed to use this connector '''\n if self.max_query_count is not None:\n if self.connector.query_count >= self.max_query_count:\n return False\n return True\n\n\n def get_or_create_book(self, remote_id):\n ''' translate arbitrary json into an Activitypub dataclass '''\n # first, check if we have the origin_id saved\n existing = models.Edition.find_existing_by_remote_id(remote_id) or \\\n models.Work.find_existing_by_remote_id(remote_id)\n if existing:\n if hasattr(existing, 'get_default_editon'):\n return existing.get_default_editon()\n return existing\n\n # load the json\n data = get_data(remote_id)\n mapped_data = dict_from_mappings(data, self.book_mappings)\n if self.is_work_data(data):\n try:\n edition_data = self.get_edition_from_work_data(data)\n except (KeyError, ConnectorException):\n # hack: re-use the work data as the edition data\n # this is why remote ids aren't necessarily unique\n edition_data = data\n work_data = mapped_data\n else:\n try:\n work_data = self.get_work_from_edition_data(data)\n work_data = dict_from_mappings(work_data, self.book_mappings)\n except (KeyError, ConnectorException):\n work_data = mapped_data\n edition_data = data\n\n if not work_data or not edition_data:\n raise ConnectorException('Unable to load book data: %s' % remote_id)\n\n with transaction.atomic():\n # create activitypub object\n work_activity = activitypub.Work(**work_data)\n # this will dedupe automatically\n work = work_activity.to_model(models.Work)\n for author in self.get_authors_from_data(data):\n work.authors.add(author)\n\n edition = self.create_edition_from_data(work, edition_data)\n load_more_data.delay(self.connector.id, work.id)\n return edition\n\n\n def create_edition_from_data(self, work, edition_data):\n ''' if we already have the work, we're ready '''\n mapped_data = dict_from_mappings(edition_data, self.book_mappings)\n mapped_data['work'] = work.remote_id\n edition_activity = activitypub.Edition(**mapped_data)\n edition = edition_activity.to_model(models.Edition)\n edition.connector = self.connector\n edition.save()\n\n work.default_edition = edition\n work.save()\n\n for author in self.get_authors_from_data(edition_data):\n edition.authors.add(author)\n if not edition.authors.exists() and work.authors.exists():\n edition.authors.set(work.authors.all())\n\n return edition\n\n\n def get_or_create_author(self, remote_id):\n ''' load that author '''\n existing = models.Author.find_existing_by_remote_id(remote_id)\n if existing:\n return existing\n\n data = get_data(remote_id)\n\n mapped_data = dict_from_mappings(data, self.author_mappings)\n activity = activitypub.Author(**mapped_data)\n # this will dedupe\n return activity.to_model(models.Author)\n\n\n @abstractmethod\n def is_work_data(self, data):\n ''' differentiate works and editions '''\n\n @abstractmethod\n def get_edition_from_work_data(self, data):\n ''' every work needs at least one edition '''\n\n @abstractmethod\n def get_work_from_edition_data(self, data):\n ''' every edition needs a work '''\n\n @abstractmethod\n def get_authors_from_data(self, data):\n ''' load author data '''\n\n @abstractmethod\n def expand_book_data(self, book):\n ''' get more info on a book '''\n\n\ndef dict_from_mappings(data, mappings):\n ''' create a dict in Activitypub format, using mappings supplies by\n the subclass '''\n result = {}\n for mapping in mappings:\n result[mapping.local_field] = mapping.get_value(data)\n return result\n\n\ndef get_data(url):\n ''' wrapper for request.get '''\n try:\n resp = requests.get(\n url,\n headers={\n 'Accept': 'application/json; charset=utf-8',\n 'User-Agent': settings.USER_AGENT,\n },\n )\n except (RequestError, SSLError) as e:\n logger.exception(e)\n raise ConnectorException()\n\n if not resp.ok:\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError as e:\n logger.exception(e)\n raise ConnectorException()\n try:\n data = resp.json()\n except ValueError as e:\n logger.exception(e)\n raise ConnectorException()\n\n return data\n\n\ndef get_image(url):\n ''' wrapper for requesting an image '''\n try:\n resp = requests.get(\n url,\n headers={\n 'User-Agent': settings.USER_AGENT,\n },\n )\n except (RequestError, SSLError):\n return None\n if not resp.ok:\n return None\n return resp\n\n\n@dataclass\nclass SearchResult:\n ''' standardized search result object '''\n title: str\n key: str\n author: str\n year: str\n connector: object\n confidence: int = 1\n\n def __repr__(self):\n return \"<SearchResult key={!r} title={!r} author={!r}>\".format(\n self.key, self.title, self.author)\n\n def json(self):\n ''' serialize a connector for json response '''\n serialized = asdict(self)\n del serialized['connector']\n return serialized\n\n\nclass Mapping:\n ''' associate a local database field with a field in an external dataset '''\n def __init__(self, local_field, remote_field=None, formatter=None):\n noop = lambda x: x\n\n self.local_field = local_field\n self.remote_field = remote_field or local_field\n self.formatter = formatter or noop\n\n def get_value(self, data):\n ''' pull a field from incoming json and return the formatted version '''\n value = data.get(self.remote_field)\n if not value:\n return None\n try:\n return self.formatter(value)\n except:# pylint: disable=bare-except\n return None\n", "path": "bookwyrm/connectors/abstract_connector.py"}]} | 3,143 | 133 |
gh_patches_debug_608 | rasdani/github-patches | git_diff | pex-tool__pex-1482 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.51
On the docket:
+ [ ] UnicodeDecodeError when packaging after upgrading to v2.1.46 #1479
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.50"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.50"
+__version__ = "2.1.51"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.50\"\n+__version__ = \"2.1.51\"\n", "issue": "Release 2.1.51\nOn the docket:\r\n+ [ ] UnicodeDecodeError when packaging after upgrading to v2.1.46 #1479 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.50\"\n", "path": "pex/version.py"}]} | 621 | 96 |
gh_patches_debug_22877 | rasdani/github-patches | git_diff | kornia__kornia-2514 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix a Bug in `kornia.augmentation.RandomRain`
### Describe the bug
When applying `[kornia.augmentation.RandomRain](https://kornia.readthedocs.io/en/latest/augmentation.module.html#kornia.augmentation.RandomRain)` over an image, it seems that subsequent distortions are all affected by the rain effect. For example, if you apply `RandomRotation` before and after calling rain, the rotation before `RandomRain` is fine. However, the rotation after calling `RandomRain` is influenced by the rain. same thing happens for all of the other distortion you may call after `RandomRotation` function. I tried several other [augmentation ](https://kornia.readthedocs.io/en/latest/augmentation.module.html#kornia.augmentation)here and this is the case for all of them.

### Reproduction steps
```bash
The following link is a minimal code as an example (on google colab).
```
[sample code](https://colab.research.google.com/drive/1m_Pjqa6-MgO2ybZeNnLYdDerrErYpKw_#scrollTo=dwr_iTFt28t5)

### Expected behavior
When applying the `kornia.augmentation.RandomRain` augmentation to an image, it should only affect the rain effect on that particular image and not introduce any unintended global effects on subsequent transformations. For example, if rotation is applied before and after calling `RandomRain`, the rotation after the rain effect should not be influenced by the rain.
### Environment
```shell
- Running on colab
- OS Linux
- Python version: 2.0.1+cu118
- GPU models and configuration:T4
```
### Additional context
_No response_
</issue>
<code>
[start of kornia/augmentation/_2d/intensity/random_rain.py]
1 from __future__ import annotations
2
3 from typing import Any
4
5 import torch
6
7 from kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D
8 from kornia.augmentation.random_generator._2d import RainGenerator
9 from kornia.core import Tensor
10 from kornia.core.check import KORNIA_CHECK
11
12
13 class RandomRain(IntensityAugmentationBase2D):
14 r"""Add Random Rain to the image.
15
16 Args:
17 p: probability of applying the transformation.
18 number_of_drops: number of drops per image
19 drop_height: height of the drop in image(same for each drops in one image)
20 drop_width: width of the drop in image(same for each drops in one image)
21 Shape:
22 - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`
23 - Output: :math:`(B, C, H, W)`
24
25 Examples:
26 >>> rng = torch.manual_seed(0)
27 >>> input = torch.rand(1, 1, 5, 5)
28 >>> rain = RandomRain(p=1,drop_height=(1,2),drop_width=(1,2),number_of_drops=(1,1))
29 >>> rain(input)
30 tensor([[[[0.4963, 0.7843, 0.0885, 0.1320, 0.3074],
31 [0.6341, 0.4901, 0.8964, 0.4556, 0.6323],
32 [0.3489, 0.4017, 0.0223, 0.1689, 0.2939],
33 [0.5185, 0.6977, 0.8000, 0.1610, 0.2823],
34 [0.6816, 0.9152, 0.3971, 0.8742, 0.4194]]]])
35 """
36
37 def __init__(
38 self,
39 same_on_batch: bool = False,
40 p: float = 0.5,
41 keepdim: bool = False,
42 number_of_drops: tuple[int, int] = (1000, 2000),
43 drop_height: tuple[int, int] = (5, 20),
44 drop_width: tuple[int, int] = (-5, 5),
45 ) -> None:
46 super().__init__(p=p, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim)
47 self._param_generator = RainGenerator(number_of_drops, drop_height, drop_width)
48
49 def apply_transform(
50 self, image: Tensor, params: dict[str, Tensor], flags: dict[str, Any], transform: Tensor | None = None
51 ) -> Tensor:
52 # Check array and drops size
53 KORNIA_CHECK(image.shape[1] in {3, 1}, "Number of color channels should be 1 or 3.")
54 KORNIA_CHECK(
55 bool(
56 torch.all(params['drop_height_factor'] <= image.shape[2])
57 and torch.all(params['drop_height_factor'] > 0)
58 ),
59 "Height of drop should be greater than zero and less than image height.",
60 )
61
62 KORNIA_CHECK(
63 bool(torch.all(torch.abs(params['drop_width_factor']) <= image.shape[3])),
64 "Width of drop should be less than image width.",
65 )
66
67 for i in range(image.shape[0]):
68 number_of_drops: int = int(params['number_of_drops_factor'][i])
69 # We generate tensor with maximum number of drops, and then remove unnecessary drops.
70
71 coordinates_of_drops: Tensor = params['coordinates_factor'][i][:number_of_drops]
72 height_of_drop: int = int(params['drop_height_factor'][i])
73 width_of_drop: int = int(params['drop_width_factor'][i])
74
75 # Generate start coordinates for each drop
76 random_y_coords = coordinates_of_drops[:, 0] * (image.shape[2] - height_of_drop - 1)
77 if width_of_drop > 0:
78 random_x_coords = coordinates_of_drops[:, 1] * (image.shape[3] - width_of_drop - 1)
79 else:
80 random_x_coords = coordinates_of_drops[:, 1] * (image.shape[3] + width_of_drop - 1) - width_of_drop
81
82 coords = torch.cat([random_y_coords[None], random_x_coords[None]], dim=0).to(image.device, dtype=torch.long)
83
84 # Generate how our drop will look like into the image
85 size_of_line: int = max(height_of_drop, abs(width_of_drop))
86 x = torch.linspace(start=0, end=height_of_drop, steps=size_of_line, dtype=torch.long).to(image.device)
87 y = torch.linspace(start=0, end=width_of_drop, steps=size_of_line, dtype=torch.long).to(image.device)
88 # Draw lines
89 for k in range(x.shape[0]):
90 image[i, :, coords[0] + x[k], coords[1] + y[k]] = 200 / 255
91 return image
92
[end of kornia/augmentation/_2d/intensity/random_rain.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/augmentation/_2d/intensity/random_rain.py b/kornia/augmentation/_2d/intensity/random_rain.py
--- a/kornia/augmentation/_2d/intensity/random_rain.py
+++ b/kornia/augmentation/_2d/intensity/random_rain.py
@@ -63,7 +63,7 @@
bool(torch.all(torch.abs(params['drop_width_factor']) <= image.shape[3])),
"Width of drop should be less than image width.",
)
-
+ modeified_img = image.clone()
for i in range(image.shape[0]):
number_of_drops: int = int(params['number_of_drops_factor'][i])
# We generate tensor with maximum number of drops, and then remove unnecessary drops.
@@ -87,5 +87,5 @@
y = torch.linspace(start=0, end=width_of_drop, steps=size_of_line, dtype=torch.long).to(image.device)
# Draw lines
for k in range(x.shape[0]):
- image[i, :, coords[0] + x[k], coords[1] + y[k]] = 200 / 255
- return image
+ modeified_img[i, :, coords[0] + x[k], coords[1] + y[k]] = 200 / 255
+ return modeified_img
| {"golden_diff": "diff --git a/kornia/augmentation/_2d/intensity/random_rain.py b/kornia/augmentation/_2d/intensity/random_rain.py\n--- a/kornia/augmentation/_2d/intensity/random_rain.py\n+++ b/kornia/augmentation/_2d/intensity/random_rain.py\n@@ -63,7 +63,7 @@\n bool(torch.all(torch.abs(params['drop_width_factor']) <= image.shape[3])),\n \"Width of drop should be less than image width.\",\n )\n-\n+ modeified_img = image.clone()\n for i in range(image.shape[0]):\n number_of_drops: int = int(params['number_of_drops_factor'][i])\n # We generate tensor with maximum number of drops, and then remove unnecessary drops.\n@@ -87,5 +87,5 @@\n y = torch.linspace(start=0, end=width_of_drop, steps=size_of_line, dtype=torch.long).to(image.device)\n # Draw lines\n for k in range(x.shape[0]):\n- image[i, :, coords[0] + x[k], coords[1] + y[k]] = 200 / 255\n- return image\n+ modeified_img[i, :, coords[0] + x[k], coords[1] + y[k]] = 200 / 255\n+ return modeified_img\n", "issue": "Fix a Bug in `kornia.augmentation.RandomRain`\n### Describe the bug\r\n\r\n\r\nWhen applying `[kornia.augmentation.RandomRain](https://kornia.readthedocs.io/en/latest/augmentation.module.html#kornia.augmentation.RandomRain)` over an image, it seems that subsequent distortions are all affected by the rain effect. For example, if you apply `RandomRotation` before and after calling rain, the rotation before `RandomRain` is fine. However, the rotation after calling `RandomRain` is influenced by the rain. same thing happens for all of the other distortion you may call after `RandomRotation` function. I tried several other [augmentation ](https://kornia.readthedocs.io/en/latest/augmentation.module.html#kornia.augmentation)here and this is the case for all of them. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\nThe following link is a minimal code as an example (on google colab).\r\n```\r\n[sample code](https://colab.research.google.com/drive/1m_Pjqa6-MgO2ybZeNnLYdDerrErYpKw_#scrollTo=dwr_iTFt28t5)\r\n\r\n\r\n\r\n### Expected behavior\r\n\r\nWhen applying the `kornia.augmentation.RandomRain` augmentation to an image, it should only affect the rain effect on that particular image and not introduce any unintended global effects on subsequent transformations. For example, if rotation is applied before and after calling `RandomRain`, the rotation after the rain effect should not be influenced by the rain.\r\n\r\n\r\n### Environment\r\n\r\n```shell\r\n- Running on colab\r\n- OS Linux\r\n- Python version: 2.0.1+cu118\r\n- GPU models and configuration:T4\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nimport torch\n\nfrom kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D\nfrom kornia.augmentation.random_generator._2d import RainGenerator\nfrom kornia.core import Tensor\nfrom kornia.core.check import KORNIA_CHECK\n\n\nclass RandomRain(IntensityAugmentationBase2D):\n r\"\"\"Add Random Rain to the image.\n\n Args:\n p: probability of applying the transformation.\n number_of_drops: number of drops per image\n drop_height: height of the drop in image(same for each drops in one image)\n drop_width: width of the drop in image(same for each drops in one image)\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`\n - Output: :math:`(B, C, H, W)`\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.rand(1, 1, 5, 5)\n >>> rain = RandomRain(p=1,drop_height=(1,2),drop_width=(1,2),number_of_drops=(1,1))\n >>> rain(input)\n tensor([[[[0.4963, 0.7843, 0.0885, 0.1320, 0.3074],\n [0.6341, 0.4901, 0.8964, 0.4556, 0.6323],\n [0.3489, 0.4017, 0.0223, 0.1689, 0.2939],\n [0.5185, 0.6977, 0.8000, 0.1610, 0.2823],\n [0.6816, 0.9152, 0.3971, 0.8742, 0.4194]]]])\n \"\"\"\n\n def __init__(\n self,\n same_on_batch: bool = False,\n p: float = 0.5,\n keepdim: bool = False,\n number_of_drops: tuple[int, int] = (1000, 2000),\n drop_height: tuple[int, int] = (5, 20),\n drop_width: tuple[int, int] = (-5, 5),\n ) -> None:\n super().__init__(p=p, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim)\n self._param_generator = RainGenerator(number_of_drops, drop_height, drop_width)\n\n def apply_transform(\n self, image: Tensor, params: dict[str, Tensor], flags: dict[str, Any], transform: Tensor | None = None\n ) -> Tensor:\n # Check array and drops size\n KORNIA_CHECK(image.shape[1] in {3, 1}, \"Number of color channels should be 1 or 3.\")\n KORNIA_CHECK(\n bool(\n torch.all(params['drop_height_factor'] <= image.shape[2])\n and torch.all(params['drop_height_factor'] > 0)\n ),\n \"Height of drop should be greater than zero and less than image height.\",\n )\n\n KORNIA_CHECK(\n bool(torch.all(torch.abs(params['drop_width_factor']) <= image.shape[3])),\n \"Width of drop should be less than image width.\",\n )\n\n for i in range(image.shape[0]):\n number_of_drops: int = int(params['number_of_drops_factor'][i])\n # We generate tensor with maximum number of drops, and then remove unnecessary drops.\n\n coordinates_of_drops: Tensor = params['coordinates_factor'][i][:number_of_drops]\n height_of_drop: int = int(params['drop_height_factor'][i])\n width_of_drop: int = int(params['drop_width_factor'][i])\n\n # Generate start coordinates for each drop\n random_y_coords = coordinates_of_drops[:, 0] * (image.shape[2] - height_of_drop - 1)\n if width_of_drop > 0:\n random_x_coords = coordinates_of_drops[:, 1] * (image.shape[3] - width_of_drop - 1)\n else:\n random_x_coords = coordinates_of_drops[:, 1] * (image.shape[3] + width_of_drop - 1) - width_of_drop\n\n coords = torch.cat([random_y_coords[None], random_x_coords[None]], dim=0).to(image.device, dtype=torch.long)\n\n # Generate how our drop will look like into the image\n size_of_line: int = max(height_of_drop, abs(width_of_drop))\n x = torch.linspace(start=0, end=height_of_drop, steps=size_of_line, dtype=torch.long).to(image.device)\n y = torch.linspace(start=0, end=width_of_drop, steps=size_of_line, dtype=torch.long).to(image.device)\n # Draw lines\n for k in range(x.shape[0]):\n image[i, :, coords[0] + x[k], coords[1] + y[k]] = 200 / 255\n return image\n", "path": "kornia/augmentation/_2d/intensity/random_rain.py"}]} | 2,422 | 307 |
gh_patches_debug_3021 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1576 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add new method to count number of rows for MySQL datasources :electric_plug: :1234:
When MindsDB creates a new MySQL datasource we get information for row counts by fetching all datasources. The problem here is that if datasource is big it takes a lot of time. We need a new get_row_count method to return the number of rows per datasource. The PR should include this method inside the PostgreSQL class .
## Steps :male_detective: :female_detective:
- Implement in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51
- Example method:
```py
def get_row_count(self, query):
result = conn.execute(query)
return len(query)
```
- Push to staging branch
## Additional rewards :1st_place_medal:
Each code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
</issue>
<code>
[start of mindsdb/integrations/mysql/mysql.py]
1 import os
2 import shutil
3 import tempfile
4
5 from contextlib import closing
6 import mysql.connector
7
8 from lightwood.api import dtype
9 from mindsdb.integrations.base import Integration
10 from mindsdb.utilities.log import log
11
12
13 class MySQLConnectionChecker:
14 def __init__(self, **kwargs):
15 self.host = kwargs.get('host')
16 self.port = kwargs.get('port')
17 self.user = kwargs.get('user')
18 self.password = kwargs.get('password')
19 self.ssl = kwargs.get('ssl')
20 self.ssl_ca = kwargs.get('ssl_ca')
21 self.ssl_cert = kwargs.get('ssl_cert')
22 self.ssl_key = kwargs.get('ssl_key')
23
24 def _get_connnection(self):
25 config = {
26 "host": self.host,
27 "port": self.port,
28 "user": self.user,
29 "password": self.password
30 }
31 if self.ssl is True:
32 config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]
33 if self.ssl_ca is not None:
34 config["ssl_ca"] = self.ssl_ca
35 if self.ssl_cert is not None:
36 config["ssl_cert"] = self.ssl_cert
37 if self.ssl_key is not None:
38 config["ssl_key"] = self.ssl_key
39 return mysql.connector.connect(**config)
40
41 def check_connection(self):
42 try:
43 con = self._get_connnection()
44 with closing(con) as con:
45 connected = con.is_connected()
46 except Exception:
47 connected = False
48 return connected
49
50
51 class MySQL(Integration, MySQLConnectionChecker):
52 def __init__(self, config, name, db_info):
53 super().__init__(config, name)
54 self.user = db_info.get('user')
55 self.password = db_info.get('password')
56 self.host = db_info.get('host')
57 self.port = db_info.get('port')
58 self.ssl = db_info.get('ssl')
59 self.ssl_ca = db_info.get('ssl_ca')
60 self.ssl_cert = db_info.get('ssl_cert')
61 self.ssl_key = db_info.get('ssl_key')
62
63 def _to_mysql_table(self, dtype_dict, predicted_cols, columns):
64 subtype_map = {
65 dtype.integer: 'int',
66 dtype.float: 'double',
67 dtype.binary: 'bool',
68 dtype.date: 'Date',
69 dtype.datetime: 'Datetime',
70 dtype.binary: 'VARCHAR(500)',
71 dtype.categorical: 'VARCHAR(500)',
72 dtype.tags: 'VARCHAR(500)',
73 dtype.image: 'VARCHAR(500)',
74 dtype.video: 'VARCHAR(500)',
75 dtype.audio: 'VARCHAR(500)',
76 dtype.short_text: 'VARCHAR(500)',
77 dtype.rich_text: 'VARCHAR(500)',
78 dtype.array: 'VARCHAR(500)'
79 }
80
81 column_declaration = []
82 for name in columns:
83 try:
84 col_subtype = dtype_dict[name]
85 new_type = subtype_map[col_subtype]
86 column_declaration.append(f' `{name}` {new_type} ')
87 if name in predicted_cols:
88 column_declaration.append(f' `{name}_original` {new_type} ')
89 except Exception as e:
90 log.error(f'Error: can not determine mysql data type for column {name}: {e}')
91
92 return column_declaration
93
94 def _escape_table_name(self, name):
95 return '`' + name.replace('`', '``') + '`'
96
97 def _query(self, query):
98 con = self._get_connnection()
99 with closing(con) as con:
100 cur = con.cursor(dictionary=True, buffered=True)
101 cur.execute(query)
102 res = True
103 try:
104 res = cur.fetchall()
105 except Exception:
106 pass
107 con.commit()
108
109 return res
110
111 def _get_connect_string(self, table):
112 user = f"{self.config['api']['mysql']['user']}_{self.name}"
113 password = self.config['api']['mysql']['password']
114 host = self.config['api']['mysql']['host']
115 port = self.config['api']['mysql']['port']
116
117 if password is None or password == '':
118 connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'
119 else:
120 connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'
121
122 return connect
123
124 def setup(self):
125 self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')
126 self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')
127
128 connect = self._get_connect_string('predictors')
129
130 q = f"""
131 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (
132 name VARCHAR(500),
133 status VARCHAR(500),
134 accuracy VARCHAR(500),
135 predict VARCHAR(500),
136 select_data_query VARCHAR(500),
137 external_datasource VARCHAR(500),
138 training_options VARCHAR(500),
139 key name_key (name)
140 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
141 """
142 self._query(q)
143
144 connect = self._get_connect_string('commands')
145
146 q = f"""
147 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (
148 command VARCHAR(500),
149 key command_key (command)
150 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
151 """
152 self._query(q)
153
154 def register_predictors(self, model_data_arr):
155 for model_meta in model_data_arr:
156 name = model_meta['name']
157 predict = model_meta['predict']
158 if not isinstance(predict, list):
159 predict = [predict]
160 columns_sql = ','.join(self._to_mysql_table(
161 model_meta['dtype_dict'],
162 predict,
163 list(model_meta['dtype_dict'].keys())
164 ))
165 columns_sql += ',`when_data` varchar(500)'
166 columns_sql += ',`select_data_query` varchar(500)'
167 columns_sql += ',`external_datasource` varchar(500)'
168 for col in predict:
169 columns_sql += f',`{col}_confidence` double'
170 if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):
171 columns_sql += f',`{col}_min` double'
172 columns_sql += f',`{col}_max` double'
173 columns_sql += f',`{col}_explain` varchar(500)'
174
175 connect = self._get_connect_string(name)
176
177 self.unregister_predictor(name)
178 q = f"""
179 CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (
180 {columns_sql},
181 index when_data_index (when_data),
182 index select_data_query_index (select_data_query),
183 index external_datasource_index (external_datasource)
184 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
185 """
186 self._query(q)
187
188 def unregister_predictor(self, name):
189 q = f"""
190 drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
191 """
192 self._query(q)
193
[end of mindsdb/integrations/mysql/mysql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py
--- a/mindsdb/integrations/mysql/mysql.py
+++ b/mindsdb/integrations/mysql/mysql.py
@@ -190,3 +190,10 @@
drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
"""
self._query(q)
+
+ def get_row_count(self, query):
+ q = f"""
+ SELECT COUNT(*) as count
+ FROM ({query}) as query;"""
+ result = self._query(q)
+ return result[0]['count']
| {"golden_diff": "diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py\n--- a/mindsdb/integrations/mysql/mysql.py\n+++ b/mindsdb/integrations/mysql/mysql.py\n@@ -190,3 +190,10 @@\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n+\n+ def get_row_count(self, query):\n+ q = f\"\"\" \n+ SELECT COUNT(*) as count\n+ FROM ({query}) as query;\"\"\"\n+ result = self._query(q)\n+ return result[0]['count']\n", "issue": "Add new method to count number of rows for MySQL datasources :electric_plug: :1234: \nWhen MindsDB creates a new MySQL datasource we get information for row counts by fetching all datasources. The problem here is that if datasource is big it takes a lot of time. We need a new get_row_count method to return the number of rows per datasource. The PR should include this method inside the PostgreSQL class .\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Implement in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51\r\n- Example method:\r\n```py\r\ndef get_row_count(self, query):\r\n result = conn.execute(query)\r\n return len(query)\r\n```\r\n- Push to staging branch\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\r\n \r\n\r\n\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom contextlib import closing\nimport mysql.connector\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass MySQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.ssl = kwargs.get('ssl')\n self.ssl_ca = kwargs.get('ssl_ca')\n self.ssl_cert = kwargs.get('ssl_cert')\n self.ssl_key = kwargs.get('ssl_key')\n\n def _get_connnection(self):\n config = {\n \"host\": self.host,\n \"port\": self.port,\n \"user\": self.user,\n \"password\": self.password\n }\n if self.ssl is True:\n config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]\n if self.ssl_ca is not None:\n config[\"ssl_ca\"] = self.ssl_ca\n if self.ssl_cert is not None:\n config[\"ssl_cert\"] = self.ssl_cert\n if self.ssl_key is not None:\n config[\"ssl_key\"] = self.ssl_key\n return mysql.connector.connect(**config)\n\n def check_connection(self):\n try:\n con = self._get_connnection()\n with closing(con) as con:\n connected = con.is_connected()\n except Exception:\n connected = False\n return connected\n\n\nclass MySQL(Integration, MySQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.ssl = db_info.get('ssl')\n self.ssl_ca = db_info.get('ssl_ca')\n self.ssl_cert = db_info.get('ssl_cert')\n self.ssl_key = db_info.get('ssl_key')\n\n def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.array: 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine mysql data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '``') + '`'\n\n def _query(self, query):\n con = self._get_connnection()\n with closing(con) as con:\n cur = con.cursor(dictionary=True, buffered=True)\n cur.execute(query)\n res = True\n try:\n res = cur.fetchall()\n except Exception:\n pass\n con.commit()\n\n return res\n\n def _get_connect_string(self, table):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n if password is None or password == '':\n connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'\n else:\n connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'\n\n return connect\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n connect = self._get_connect_string('predictors')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name VARCHAR(500),\n status VARCHAR(500),\n accuracy VARCHAR(500),\n predict VARCHAR(500),\n select_data_query VARCHAR(500),\n external_datasource VARCHAR(500),\n training_options VARCHAR(500),\n key name_key (name)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n connect = self._get_connect_string('commands')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command VARCHAR(500),\n key command_key (command)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_mysql_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` varchar(500)'\n columns_sql += ',`select_data_query` varchar(500)'\n columns_sql += ',`external_datasource` varchar(500)'\n for col in predict:\n columns_sql += f',`{col}_confidence` double'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` double'\n columns_sql += f',`{col}_max` double'\n columns_sql += f',`{col}_explain` varchar(500)'\n\n connect = self._get_connect_string(name)\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql},\n index when_data_index (when_data),\n index select_data_query_index (select_data_query),\n index external_datasource_index (external_datasource)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n", "path": "mindsdb/integrations/mysql/mysql.py"}]} | 2,783 | 144 |
gh_patches_debug_8859 | rasdani/github-patches | git_diff | angr__angr-2453 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error being raised while exploring ls
This error is being thrown after a simple exploration:
```
Traceback (most recent call last):
File "try.py", line 5, in <module>
sm.explore()
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 239, in explore
self.run(stash=stash, n=n, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 261, in run
self.step(stash=stash, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/misc/hookset.py", line 75, in __call__
result = current_hook(self.func.__self__, *args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/exploration_techniques/explorer.py", line 96, in step
return simgr.step(stash=stash, extra_stop_points=base_extra_stop_points | self._extra_stop_points, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/misc/hookset.py", line 80, in __call__
return self.func(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 346, in step
successors = self.step_state(state, successor_func=successor_func, **run_args)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 383, in step_state
successors = self.successors(state, successor_func=successor_func, **run_args)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py", line 422, in successors
return self._project.factory.successors(state, **run_args)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/factory.py", line 60, in successors
return self.default_engine.process(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/vex/light/slicing.py", line 19, in process
return super().process(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/engine.py", line 149, in process
self.process_successors(self.successors, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/failure.py", line 21, in process_successors
return super().process_successors(successors, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/syscall.py", line 38, in process_successors
return self.process_procedure(state, successors, sys_procedure, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/procedure.py", line 37, in process_procedure
inst = procedure.execute(state, successors, ret_to=ret_to, arguments=arguments)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_procedure.py", line 230, in execute
r = getattr(inst, inst.run_func)(*sim_args, **inst.kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/procedures/linux_kernel/iovec.py", line 21, in run
if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py", line 126, in concrete_shortcut_bool
return f(self, *args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/sim_action_object.py", line 57, in ast_stripper
return f(*new_args, **new_kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py", line 89, in wrapped_f
return f(*args, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py", line 585, in is_true
return self._solver.is_true(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontend_mixins/concrete_handler_mixin.py", line 53, in is_true
return super(ConcreteHandlerMixin, self).is_true(e, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontend_mixins/constraint_filter_mixin.py", line 60, in is_true
return super(ConstraintFilterMixin, self).is_true(e, extra_constraints=ec, **kwargs)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontends/composite_frontend.py", line 349, in is_true
r = ms.is_true(e, extra_constraints=extra_constraints, exact=exact)
File "/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontends/full_frontend.py", line 184, in is_true
return e.is_true()
AttributeError: 'NotImplementedType' object has no attribute 'is_true'
```
**Environment Information.**
angr environment report
=============================
Date: 2020-12-30 12:39:53.872698
Running in virtual environment at /home/berlinm/.virtualenvs/angr
Platform: linux-x86_64
Python version: 3.7.9 (7e6e2bb30ac5, Nov 18 2020, 10:55:52)
[PyPy 7.3.3-beta0 with GCC 7.3.1 20180303 (Red Hat 7.3.1-5)]
######## angr #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/angr
Pip version angr 9.0.5034
Couldn't find git info
######## ailment #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/ailment
Pip version ailment 9.0.5034
Couldn't find git info
######## cle #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/cle
Pip version cle 9.0.5034
Couldn't find git info
######## pyvex #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/pyvex
Pip version pyvex 9.0.5034
Couldn't find git info
######## claripy #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/claripy
Pip version claripy 9.0.5034
Couldn't find git info
######## archinfo #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/archinfo
Pip version archinfo 9.0.5034
Couldn't find git info
######## z3 #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/z3
Pip version z3-solver 4.8.9.0
Couldn't find git info
######## unicorn #########
Python found it in /home/berlinm/.local/lib/python3.7/site-packages/unicorn
Pip version unicorn 1.0.2rc4
Couldn't find git info
######### Native Module Info ##########
angr: <CDLL '/home/berlinm/.local/lib/python3.7/site-packages/angr/lib/angr_native.so', handle 61bfe90 at 0x638ebb8>
unicorn: <CDLL '/home/berlinm/.local/lib/python3.7/site-packages/unicorn/lib/libunicorn.so', handle 2393fd0 at 0x7fcacfae5da8>
pyvex: <cffi.api._make_ffi_library.<locals>.FFILibrary object at 0x00007fcad0b9b398>
z3: <CDLL '/home/berlinm/.local/lib/python3.7/site-packages/z3/lib/libz3.so', handle 2fc8df0 at 0x3206758>
**To Reproduce.**
project = angr.Project('ls')
sm = project.factory.simulation_manager(project.factory.call_state(0x404a70)) # Some function's address
sm.explore()
# The binary (inside the zip):
[ls.zip](https://github.com/angr/angr/files/5753958/ls.zip)
</issue>
<code>
[start of angr/procedures/linux_kernel/iovec.py]
1 import angr
2 from ..posix.read import read
3 from ..posix.write import write
4 from ...sim_type import register_types, parse_types
5
6 register_types(parse_types("""
7 struct iovec {
8 void *iov_base; /* Starting address */
9 size_t iov_len; /* Number of bytes to transfer */
10 };
11 """))
12
13 class readv(angr.SimProcedure):
14 def run(self, fd, iovec, iovcnt):
15 if iovec.symbolic or iovcnt.symbolic:
16 raise angr.errors.SimPosixError("Can't handle symbolic arguments to readv")
17 iovcnt = self.state.solver.eval(iovcnt)
18 res = 0
19 for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:
20 tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len)
21 if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):
22 return tmpres
23
24 return res
25
26
27 class writev(angr.SimProcedure):
28 def run(self, fd, iovec, iovcnt):
29 if iovec.symbolic or iovcnt.symbolic:
30 raise angr.errors.SimPosixError("Can't handle symbolic arguments to writev")
31 iovcnt = self.state.solver.eval(iovcnt)
32 res = 0
33 for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:
34 tmpres = self.inline_call(write, fd, element.iov_base, element.iov_len).ret_expr
35 if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):
36 return tmpres
37
38 return res
39
[end of angr/procedures/linux_kernel/iovec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/angr/procedures/linux_kernel/iovec.py b/angr/procedures/linux_kernel/iovec.py
--- a/angr/procedures/linux_kernel/iovec.py
+++ b/angr/procedures/linux_kernel/iovec.py
@@ -17,7 +17,7 @@
iovcnt = self.state.solver.eval(iovcnt)
res = 0
for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:
- tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len)
+ tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len).ret_expr
if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):
return tmpres
| {"golden_diff": "diff --git a/angr/procedures/linux_kernel/iovec.py b/angr/procedures/linux_kernel/iovec.py\n--- a/angr/procedures/linux_kernel/iovec.py\n+++ b/angr/procedures/linux_kernel/iovec.py\n@@ -17,7 +17,7 @@\n iovcnt = self.state.solver.eval(iovcnt)\n res = 0\n for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:\n- tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len)\n+ tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len).ret_expr\n if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):\n return tmpres\n", "issue": "Error being raised while exploring ls\nThis error is being thrown after a simple exploration:\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"try.py\", line 5, in <module>\r\n sm.explore()\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py\", line 239, in explore\r\n self.run(stash=stash, n=n, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py\", line 261, in run\r\n self.step(stash=stash, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/misc/hookset.py\", line 75, in __call__\r\n result = current_hook(self.func.__self__, *args, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/exploration_techniques/explorer.py\", line 96, in step\r\n return simgr.step(stash=stash, extra_stop_points=base_extra_stop_points | self._extra_stop_points, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/misc/hookset.py\", line 80, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py\", line 346, in step\r\n successors = self.step_state(state, successor_func=successor_func, **run_args)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py\", line 383, in step_state\r\n successors = self.successors(state, successor_func=successor_func, **run_args)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_manager.py\", line 422, in successors\r\n return self._project.factory.successors(state, **run_args)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/factory.py\", line 60, in successors\r\n return self.default_engine.process(*args, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/vex/light/slicing.py\", line 19, in process\r\n return super().process(*args, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/engine.py\", line 149, in process\r\n self.process_successors(self.successors, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/failure.py\", line 21, in process_successors\r\n return super().process_successors(successors, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/syscall.py\", line 38, in process_successors\r\n return self.process_procedure(state, successors, sys_procedure, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/engines/procedure.py\", line 37, in process_procedure\r\n inst = procedure.execute(state, successors, ret_to=ret_to, arguments=arguments)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/sim_procedure.py\", line 230, in execute\r\n r = getattr(inst, inst.run_func)(*sim_args, **inst.kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/procedures/linux_kernel/iovec.py\", line 21, in run\r\n if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py\", line 126, in concrete_shortcut_bool\r\n return f(self, *args, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/sim_action_object.py\", line 57, in ast_stripper\r\n return f(*new_args, **new_kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py\", line 89, in wrapped_f\r\n return f(*args, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/angr/state_plugins/solver.py\", line 585, in is_true\r\n return self._solver.is_true(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontend_mixins/concrete_handler_mixin.py\", line 53, in is_true\r\n return super(ConcreteHandlerMixin, self).is_true(e, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontend_mixins/constraint_filter_mixin.py\", line 60, in is_true\r\n return super(ConstraintFilterMixin, self).is_true(e, extra_constraints=ec, **kwargs)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontends/composite_frontend.py\", line 349, in is_true\r\n r = ms.is_true(e, extra_constraints=extra_constraints, exact=exact)\r\n File \"/home/berlinm/.local/lib/python3.7/site-packages/claripy/frontends/full_frontend.py\", line 184, in is_true\r\n return e.is_true()\r\nAttributeError: 'NotImplementedType' object has no attribute 'is_true'\r\n```\r\n\r\n**Environment Information.**\r\nangr environment report\r\n=============================\r\nDate: 2020-12-30 12:39:53.872698\r\nRunning in virtual environment at /home/berlinm/.virtualenvs/angr\r\nPlatform: linux-x86_64\r\nPython version: 3.7.9 (7e6e2bb30ac5, Nov 18 2020, 10:55:52)\r\n[PyPy 7.3.3-beta0 with GCC 7.3.1 20180303 (Red Hat 7.3.1-5)]\r\n######## angr #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/angr\r\nPip version angr 9.0.5034\r\nCouldn't find git info\r\n######## ailment #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/ailment\r\nPip version ailment 9.0.5034\r\nCouldn't find git info\r\n######## cle #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/cle\r\nPip version cle 9.0.5034\r\nCouldn't find git info\r\n######## pyvex #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/pyvex\r\nPip version pyvex 9.0.5034\r\nCouldn't find git info\r\n######## claripy #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/claripy\r\nPip version claripy 9.0.5034\r\nCouldn't find git info\r\n######## archinfo #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/archinfo\r\nPip version archinfo 9.0.5034\r\nCouldn't find git info\r\n######## z3 #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/z3\r\nPip version z3-solver 4.8.9.0\r\nCouldn't find git info\r\n######## unicorn #########\r\nPython found it in /home/berlinm/.local/lib/python3.7/site-packages/unicorn\r\nPip version unicorn 1.0.2rc4\r\nCouldn't find git info\r\n######### Native Module Info ##########\r\nangr: <CDLL '/home/berlinm/.local/lib/python3.7/site-packages/angr/lib/angr_native.so', handle 61bfe90 at 0x638ebb8>\r\nunicorn: <CDLL '/home/berlinm/.local/lib/python3.7/site-packages/unicorn/lib/libunicorn.so', handle 2393fd0 at 0x7fcacfae5da8>\r\npyvex: <cffi.api._make_ffi_library.<locals>.FFILibrary object at 0x00007fcad0b9b398>\r\nz3: <CDLL '/home/berlinm/.local/lib/python3.7/site-packages/z3/lib/libz3.so', handle 2fc8df0 at 0x3206758>\r\n\r\n\r\n**To Reproduce.**\r\n\r\nproject = angr.Project('ls')\r\n\r\nsm = project.factory.simulation_manager(project.factory.call_state(0x404a70)) # Some function's address\r\nsm.explore()\r\n\r\n\r\n# The binary (inside the zip):\r\n[ls.zip](https://github.com/angr/angr/files/5753958/ls.zip)\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import angr\nfrom ..posix.read import read\nfrom ..posix.write import write\nfrom ...sim_type import register_types, parse_types\n\nregister_types(parse_types(\"\"\"\nstruct iovec {\n void *iov_base; /* Starting address */\n size_t iov_len; /* Number of bytes to transfer */\n};\n\"\"\"))\n\nclass readv(angr.SimProcedure):\n def run(self, fd, iovec, iovcnt):\n if iovec.symbolic or iovcnt.symbolic:\n raise angr.errors.SimPosixError(\"Can't handle symbolic arguments to readv\")\n iovcnt = self.state.solver.eval(iovcnt)\n res = 0\n for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:\n tmpres = self.inline_call(read, fd, element.iov_base, element.iov_len)\n if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):\n return tmpres\n\n return res\n\n\nclass writev(angr.SimProcedure):\n def run(self, fd, iovec, iovcnt):\n if iovec.symbolic or iovcnt.symbolic:\n raise angr.errors.SimPosixError(\"Can't handle symbolic arguments to writev\")\n iovcnt = self.state.solver.eval(iovcnt)\n res = 0\n for element in self.state.mem[iovec].struct.iovec.array(iovcnt).resolved:\n tmpres = self.inline_call(write, fd, element.iov_base, element.iov_len).ret_expr\n if self.state.solver.is_true(self.state.solver.SLT(tmpres, 0)):\n return tmpres\n\n return res\n", "path": "angr/procedures/linux_kernel/iovec.py"}]} | 3,094 | 176 |
gh_patches_debug_13431 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CSV export fails on converting uuid to json
Problematic JSON structure in https://grand-challenge.org/api/v1/cases/images/redacted_uuid/?format=csv
```
{"pk":"redacted_uuid","name":"redacted.png","study":null,"files":[{"pk":"redacted_uuid","image":"redacted_uuid","file":"https://grand-challenge.org/media/images/...mhd","image_type":"MHD"},{"pk":"redacted_uuid","image":"09b3b3d6-0994-43d2-b6a9-eaff634b8805","file":"https://grand-challenge.org/media/images/...zraw","image_type":"MHD"}],"reader_study_set":["https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/","https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/"],"archive_set":[],"job_set":[],"width":596,"height":596,"depth":null,"color_space":"RGB","modality":null,"eye_choice":"NA","stereoscopic_choice":null,"field_of_view":null,"shape_without_color":[596,596],"shape":[596,596,3],"voxel_width_mm":null,"voxel_height_mm":null,"voxel_depth_mm":null,"api_url":"https://grand-challenge.org/api/v1/cases/images/redacted_uuid/"}
```
Probably due to trying to serialize the list of files to json in https://github.com/comic/grand-challenge.org/blob/14bc3dd4002756e9cf4a32bb0f238859a9175252/app/grandchallenge/core/renderers.py#L26-L27
</issue>
<code>
[start of app/grandchallenge/core/renderers.py]
1 import json
2
3 from rest_framework_csv.renderers import CSVRenderer
4
5
6 class PaginatedCSVRenderer(CSVRenderer):
7 results_field = "results"
8
9 def render(self, data, *args, **kwargs):
10 if self.results_field in data:
11 data = data[self.results_field]
12
13 return super().render(data, *args, **kwargs)
14
15 def flatten_data(self, data):
16 """
17 Create a dictionary that is 1 level deep, with nested values serialized
18 as json. This means that the header rows are now consistent.
19 """
20 for row in data:
21 flat_row = {k: self._flatten_value(v) for k, v in row.items()}
22 yield flat_row
23
24 @staticmethod
25 def _flatten_value(value):
26 if isinstance(value, (dict, list)):
27 return json.dumps(value)
28 else:
29 return value
30
[end of app/grandchallenge/core/renderers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py
--- a/app/grandchallenge/core/renderers.py
+++ b/app/grandchallenge/core/renderers.py
@@ -1,5 +1,7 @@
import json
+from rest_framework.settings import api_settings
+from rest_framework.utils.encoders import JSONEncoder
from rest_framework_csv.renderers import CSVRenderer
@@ -24,6 +26,11 @@
@staticmethod
def _flatten_value(value):
if isinstance(value, (dict, list)):
- return json.dumps(value)
+ return json.dumps(
+ value,
+ cls=JSONEncoder,
+ ensure_ascii=not api_settings.UNICODE_JSON,
+ allow_nan=not api_settings.STRICT_JSON,
+ )
else:
return value
| {"golden_diff": "diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py\n--- a/app/grandchallenge/core/renderers.py\n+++ b/app/grandchallenge/core/renderers.py\n@@ -1,5 +1,7 @@\n import json\n \n+from rest_framework.settings import api_settings\n+from rest_framework.utils.encoders import JSONEncoder\n from rest_framework_csv.renderers import CSVRenderer\n \n \n@@ -24,6 +26,11 @@\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n- return json.dumps(value)\n+ return json.dumps(\n+ value,\n+ cls=JSONEncoder,\n+ ensure_ascii=not api_settings.UNICODE_JSON,\n+ allow_nan=not api_settings.STRICT_JSON,\n+ )\n else:\n return value\n", "issue": "CSV export fails on converting uuid to json\nProblematic JSON structure in https://grand-challenge.org/api/v1/cases/images/redacted_uuid/?format=csv\r\n\r\n```\r\n{\"pk\":\"redacted_uuid\",\"name\":\"redacted.png\",\"study\":null,\"files\":[{\"pk\":\"redacted_uuid\",\"image\":\"redacted_uuid\",\"file\":\"https://grand-challenge.org/media/images/...mhd\",\"image_type\":\"MHD\"},{\"pk\":\"redacted_uuid\",\"image\":\"09b3b3d6-0994-43d2-b6a9-eaff634b8805\",\"file\":\"https://grand-challenge.org/media/images/...zraw\",\"image_type\":\"MHD\"}],\"reader_study_set\":[\"https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/\",\"https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/\"],\"archive_set\":[],\"job_set\":[],\"width\":596,\"height\":596,\"depth\":null,\"color_space\":\"RGB\",\"modality\":null,\"eye_choice\":\"NA\",\"stereoscopic_choice\":null,\"field_of_view\":null,\"shape_without_color\":[596,596],\"shape\":[596,596,3],\"voxel_width_mm\":null,\"voxel_height_mm\":null,\"voxel_depth_mm\":null,\"api_url\":\"https://grand-challenge.org/api/v1/cases/images/redacted_uuid/\"}\r\n```\r\n\r\nProbably due to trying to serialize the list of files to json in https://github.com/comic/grand-challenge.org/blob/14bc3dd4002756e9cf4a32bb0f238859a9175252/app/grandchallenge/core/renderers.py#L26-L27\n", "before_files": [{"content": "import json\n\nfrom rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n\n def flatten_data(self, data):\n \"\"\"\n Create a dictionary that is 1 level deep, with nested values serialized\n as json. This means that the header rows are now consistent.\n \"\"\"\n for row in data:\n flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n yield flat_row\n\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n return json.dumps(value)\n else:\n return value\n", "path": "app/grandchallenge/core/renderers.py"}]} | 1,166 | 180 |
gh_patches_debug_494 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-1455 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Queue full error - Multi-GPU 1M custom dataset
```
Traceback (most recent call last):
File "/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/workspace/OpenNMT-py/train.py", line 127, in batch_producer
q.put(b, False)
File "/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/queues.py", line 83, in put
raise Full
queue.Full
[2019-06-04 07:32:18,291 INFO] Step 1200/100000; acc: 79.56; ppl: 1.82; xent: 0.60; lr: 1.00000; 33996/13919 tok/s; 401 sec
Traceback (most recent call last):
File "train.py", line 196, in <module>
main(opt)
File "train.py", line 78, in main
p.join()
File "/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/process.py", line 140, in join
res = self._popen.wait(timeout)
File "/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/popen_fork.py", line 48, in wait
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
File "/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/popen_fork.py", line 28, in poll
pid, sts = os.waitpid(self.pid, flag)
File "train.py", line 180, in signal_handler
raise Exception(msg)
Exception:
-- Tracebacks above this line can probably
be ignored --
Traceback (most recent call last):
File "/workspace/OpenNMT-py/train.py", line 138, in run
single_main(opt, device_id, batch_queue, semaphore)
File "/workspace/OpenNMT-py/onmt/train_single.py", line 139, in main
valid_steps=opt.valid_steps)
File "/workspace/OpenNMT-py/onmt/trainer.py", line 224, in train
self._accum_batches(train_iter)):
File "/workspace/OpenNMT-py/onmt/trainer.py", line 162, in _accum_batches
for batch in iterator:
File "/workspace/OpenNMT-py/onmt/train_single.py", line 116, in _train_iter
batch = batch_queue.get()
File "/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/queues.py", line 113, in get
return _ForkingPickler.loads(res)
File "/opt/conda/envs/learn-dev/lib/python3.7/site-packages/torch/multiprocessing/reductions.py", line 109, in rebuild_cuda_tensor
event_sync_required)
RuntimeError: CUDA error: unknown error
```
</issue>
<code>
[start of train.py]
1 #!/usr/bin/env python
2 """Train models."""
3 import os
4 import signal
5 import torch
6
7 import onmt.opts as opts
8 import onmt.utils.distributed
9
10 from onmt.utils.misc import set_random_seed
11 from onmt.utils.logging import init_logger, logger
12 from onmt.train_single import main as single_main
13 from onmt.utils.parse import ArgumentParser
14 from onmt.inputters.inputter import build_dataset_iter, \
15 load_old_vocab, old_style_vocab, build_dataset_iter_multiple
16
17 from itertools import cycle
18
19
20 def main(opt):
21 ArgumentParser.validate_train_opts(opt)
22 ArgumentParser.update_model_opts(opt)
23 ArgumentParser.validate_model_opts(opt)
24
25 # Load checkpoint if we resume from a previous training.
26 if opt.train_from:
27 logger.info('Loading checkpoint from %s' % opt.train_from)
28 checkpoint = torch.load(opt.train_from,
29 map_location=lambda storage, loc: storage)
30 logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
31 vocab = checkpoint['vocab']
32 else:
33 vocab = torch.load(opt.data + '.vocab.pt')
34
35 # check for code where vocab is saved instead of fields
36 # (in the future this will be done in a smarter way)
37 if old_style_vocab(vocab):
38 fields = load_old_vocab(
39 vocab, opt.model_type, dynamic_dict=opt.copy_attn)
40 else:
41 fields = vocab
42
43 if len(opt.data_ids) > 1:
44 train_shards = []
45 for train_id in opt.data_ids:
46 shard_base = "train_" + train_id
47 train_shards.append(shard_base)
48 train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
49 else:
50 train_iter = build_dataset_iter("train", fields, opt)
51
52 nb_gpu = len(opt.gpu_ranks)
53
54 if opt.world_size > 1:
55 queues = []
56 mp = torch.multiprocessing.get_context('spawn')
57 semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
58 # Create a thread to listen for errors in the child processes.
59 error_queue = mp.SimpleQueue()
60 error_handler = ErrorHandler(error_queue)
61 # Train with multiprocessing.
62 procs = []
63 for device_id in range(nb_gpu):
64 q = mp.Queue(opt.queue_size)
65 queues += [q]
66 procs.append(mp.Process(target=run, args=(
67 opt, device_id, error_queue, q, semaphore), daemon=True))
68 procs[device_id].start()
69 logger.info(" Starting process pid: %d " % procs[device_id].pid)
70 error_handler.add_child(procs[device_id].pid)
71 producer = mp.Process(target=batch_producer,
72 args=(train_iter, queues, semaphore, opt,),
73 daemon=True)
74 producer.start()
75 error_handler.add_child(producer.pid)
76
77 for p in procs:
78 p.join()
79 producer.terminate()
80
81 elif nb_gpu == 1: # case 1 GPU only
82 single_main(opt, 0)
83 else: # case only CPU
84 single_main(opt, -1)
85
86
87 def batch_producer(generator_to_serve, queues, semaphore, opt):
88 init_logger(opt.log_file)
89 set_random_seed(opt.seed, False)
90 # generator_to_serve = iter(generator_to_serve)
91
92 def pred(x):
93 """
94 Filters batches that belong only
95 to gpu_ranks of current node
96 """
97 for rank in opt.gpu_ranks:
98 if x[0] % opt.world_size == rank:
99 return True
100
101 generator_to_serve = filter(
102 pred, enumerate(generator_to_serve))
103
104 def next_batch(device_id):
105 new_batch = next(generator_to_serve)
106 semaphore.acquire()
107 return new_batch[1]
108
109 b = next_batch(0)
110
111 for device_id, q in cycle(enumerate(queues)):
112 b.dataset = None
113 if isinstance(b.src, tuple):
114 b.src = tuple([_.to(torch.device(device_id))
115 for _ in b.src])
116 else:
117 b.src = b.src.to(torch.device(device_id))
118 b.tgt = b.tgt.to(torch.device(device_id))
119 b.indices = b.indices.to(torch.device(device_id))
120 b.alignment = b.alignment.to(torch.device(device_id)) \
121 if hasattr(b, 'alignment') else None
122 b.src_map = b.src_map.to(torch.device(device_id)) \
123 if hasattr(b, 'src_map') else None
124
125 # hack to dodge unpicklable `dict_keys`
126 b.fields = list(b.fields)
127 q.put(b, False)
128 b = next_batch(device_id)
129
130
131 def run(opt, device_id, error_queue, batch_queue, semaphore):
132 """ run process """
133 try:
134 gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
135 if gpu_rank != opt.gpu_ranks[device_id]:
136 raise AssertionError("An error occurred in \
137 Distributed initialization")
138 single_main(opt, device_id, batch_queue, semaphore)
139 except KeyboardInterrupt:
140 pass # killed by parent, do nothing
141 except Exception:
142 # propagate exception to parent process, keeping original traceback
143 import traceback
144 error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
145
146
147 class ErrorHandler(object):
148 """A class that listens for exceptions in children processes and propagates
149 the tracebacks to the parent process."""
150
151 def __init__(self, error_queue):
152 """ init error handler """
153 import signal
154 import threading
155 self.error_queue = error_queue
156 self.children_pids = []
157 self.error_thread = threading.Thread(
158 target=self.error_listener, daemon=True)
159 self.error_thread.start()
160 signal.signal(signal.SIGUSR1, self.signal_handler)
161
162 def add_child(self, pid):
163 """ error handler """
164 self.children_pids.append(pid)
165
166 def error_listener(self):
167 """ error listener """
168 (rank, original_trace) = self.error_queue.get()
169 self.error_queue.put((rank, original_trace))
170 os.kill(os.getpid(), signal.SIGUSR1)
171
172 def signal_handler(self, signalnum, stackframe):
173 """ signal handler """
174 for pid in self.children_pids:
175 os.kill(pid, signal.SIGINT) # kill children processes
176 (rank, original_trace) = self.error_queue.get()
177 msg = """\n\n-- Tracebacks above this line can probably
178 be ignored --\n\n"""
179 msg += original_trace
180 raise Exception(msg)
181
182
183 def _get_parser():
184 parser = ArgumentParser(description='train.py')
185
186 opts.config_opts(parser)
187 opts.model_opts(parser)
188 opts.train_opts(parser)
189 return parser
190
191
192 if __name__ == "__main__":
193 parser = _get_parser()
194
195 opt = parser.parse_args()
196 main(opt)
197
[end of train.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/train.py b/train.py
--- a/train.py
+++ b/train.py
@@ -124,7 +124,7 @@
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
- q.put(b, False)
+ q.put(b)
b = next_batch(device_id)
| {"golden_diff": "diff --git a/train.py b/train.py\n--- a/train.py\n+++ b/train.py\n@@ -124,7 +124,7 @@\n \n # hack to dodge unpicklable `dict_keys`\n b.fields = list(b.fields)\n- q.put(b, False)\n+ q.put(b)\n b = next_batch(device_id)\n", "issue": "Queue full error - Multi-GPU 1M custom dataset\n```\r\nTraceback (most recent call last):\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/process.py\", line 297, in _bootstrap\r\n self.run()\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/process.py\", line 99, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/workspace/OpenNMT-py/train.py\", line 127, in batch_producer\r\n q.put(b, False)\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/queues.py\", line 83, in put\r\n raise Full\r\nqueue.Full\r\n[2019-06-04 07:32:18,291 INFO] Step 1200/100000; acc: 79.56; ppl: 1.82; xent: 0.60; lr: 1.00000; 33996/13919 tok/s; 401 sec\r\nTraceback (most recent call last):\r\n File \"train.py\", line 196, in <module>\r\n main(opt)\r\n File \"train.py\", line 78, in main\r\n p.join()\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/process.py\", line 140, in join\r\n res = self._popen.wait(timeout)\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/popen_fork.py\", line 48, in wait\r\n return self.poll(os.WNOHANG if timeout == 0.0 else 0)\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/popen_fork.py\", line 28, in poll\r\n pid, sts = os.waitpid(self.pid, flag)\r\n File \"train.py\", line 180, in signal_handler\r\n raise Exception(msg)\r\nException: \r\n\r\n-- Tracebacks above this line can probably\r\n be ignored --\r\n\r\nTraceback (most recent call last):\r\n File \"/workspace/OpenNMT-py/train.py\", line 138, in run\r\n single_main(opt, device_id, batch_queue, semaphore)\r\n File \"/workspace/OpenNMT-py/onmt/train_single.py\", line 139, in main\r\n valid_steps=opt.valid_steps)\r\n File \"/workspace/OpenNMT-py/onmt/trainer.py\", line 224, in train\r\n self._accum_batches(train_iter)):\r\n File \"/workspace/OpenNMT-py/onmt/trainer.py\", line 162, in _accum_batches\r\n for batch in iterator:\r\n File \"/workspace/OpenNMT-py/onmt/train_single.py\", line 116, in _train_iter\r\n batch = batch_queue.get()\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/multiprocessing/queues.py\", line 113, in get\r\n return _ForkingPickler.loads(res)\r\n File \"/opt/conda/envs/learn-dev/lib/python3.7/site-packages/torch/multiprocessing/reductions.py\", line 109, in rebuild_cuda_tensor\r\n event_sync_required)\r\nRuntimeError: CUDA error: unknown error\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Train models.\"\"\"\nimport os\nimport signal\nimport torch\n\nimport onmt.opts as opts\nimport onmt.utils.distributed\n\nfrom onmt.utils.misc import set_random_seed\nfrom onmt.utils.logging import init_logger, logger\nfrom onmt.train_single import main as single_main\nfrom onmt.utils.parse import ArgumentParser\nfrom onmt.inputters.inputter import build_dataset_iter, \\\n load_old_vocab, old_style_vocab, build_dataset_iter_multiple\n\nfrom itertools import cycle\n\n\ndef main(opt):\n ArgumentParser.validate_train_opts(opt)\n ArgumentParser.update_model_opts(opt)\n ArgumentParser.validate_model_opts(opt)\n\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)\n vocab = checkpoint['vocab']\n else:\n vocab = torch.load(opt.data + '.vocab.pt')\n\n # check for code where vocab is saved instead of fields\n # (in the future this will be done in a smarter way)\n if old_style_vocab(vocab):\n fields = load_old_vocab(\n vocab, opt.model_type, dynamic_dict=opt.copy_attn)\n else:\n fields = vocab\n\n if len(opt.data_ids) > 1:\n train_shards = []\n for train_id in opt.data_ids:\n shard_base = \"train_\" + train_id\n train_shards.append(shard_base)\n train_iter = build_dataset_iter_multiple(train_shards, fields, opt)\n else:\n train_iter = build_dataset_iter(\"train\", fields, opt)\n\n nb_gpu = len(opt.gpu_ranks)\n\n if opt.world_size > 1:\n queues = []\n mp = torch.multiprocessing.get_context('spawn')\n semaphore = mp.Semaphore(opt.world_size * opt.queue_size)\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n # Train with multiprocessing.\n procs = []\n for device_id in range(nb_gpu):\n q = mp.Queue(opt.queue_size)\n queues += [q]\n procs.append(mp.Process(target=run, args=(\n opt, device_id, error_queue, q, semaphore), daemon=True))\n procs[device_id].start()\n logger.info(\" Starting process pid: %d \" % procs[device_id].pid)\n error_handler.add_child(procs[device_id].pid)\n producer = mp.Process(target=batch_producer,\n args=(train_iter, queues, semaphore, opt,),\n daemon=True)\n producer.start()\n error_handler.add_child(producer.pid)\n\n for p in procs:\n p.join()\n producer.terminate()\n\n elif nb_gpu == 1: # case 1 GPU only\n single_main(opt, 0)\n else: # case only CPU\n single_main(opt, -1)\n\n\ndef batch_producer(generator_to_serve, queues, semaphore, opt):\n init_logger(opt.log_file)\n set_random_seed(opt.seed, False)\n # generator_to_serve = iter(generator_to_serve)\n\n def pred(x):\n \"\"\"\n Filters batches that belong only\n to gpu_ranks of current node\n \"\"\"\n for rank in opt.gpu_ranks:\n if x[0] % opt.world_size == rank:\n return True\n\n generator_to_serve = filter(\n pred, enumerate(generator_to_serve))\n\n def next_batch(device_id):\n new_batch = next(generator_to_serve)\n semaphore.acquire()\n return new_batch[1]\n\n b = next_batch(0)\n\n for device_id, q in cycle(enumerate(queues)):\n b.dataset = None\n if isinstance(b.src, tuple):\n b.src = tuple([_.to(torch.device(device_id))\n for _ in b.src])\n else:\n b.src = b.src.to(torch.device(device_id))\n b.tgt = b.tgt.to(torch.device(device_id))\n b.indices = b.indices.to(torch.device(device_id))\n b.alignment = b.alignment.to(torch.device(device_id)) \\\n if hasattr(b, 'alignment') else None\n b.src_map = b.src_map.to(torch.device(device_id)) \\\n if hasattr(b, 'src_map') else None\n\n # hack to dodge unpicklable `dict_keys`\n b.fields = list(b.fields)\n q.put(b, False)\n b = next_batch(device_id)\n\n\ndef run(opt, device_id, error_queue, batch_queue, semaphore):\n \"\"\" run process \"\"\"\n try:\n gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)\n if gpu_rank != opt.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n single_main(opt, device_id, batch_queue, semaphore)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\ndef _get_parser():\n parser = ArgumentParser(description='train.py')\n\n opts.config_opts(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n return parser\n\n\nif __name__ == \"__main__\":\n parser = _get_parser()\n\n opt = parser.parse_args()\n main(opt)\n", "path": "train.py"}]} | 3,238 | 78 |
gh_patches_debug_27459 | rasdani/github-patches | git_diff | NVIDIA__apex-590 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SyncBatchNorm doesn't support 2 dimensions input?
Hi,
I'm facing the issue that the program crash when the input for SyncBatchNorm is two dimensions. Here's the code:
```python
import torch
import apex
model = apex.parallel.SyncBatchNorm(4).cuda()
data = torch.rand((8,4)).cuda()
output = model(data)
```
When running the code, error raised like this:
```
Traceback (most recent call last):
File "syncbn_test.by", line 7, in <module>
output = model(data)
File "/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm.py", line 81, in forward
return SyncBatchnormFunction.apply(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last)
File "/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm_kernel.py", line 27, in forward
mean, var_biased = syncbn.welford_mean_var(input)
RuntimeError: Dimension out of range (expected to be in range of [-2, 1], but got 2) (maybe_wrap_dim at /pytorch/aten/src/ATen/core/WrapDimMinimal.h:18)
```
And everthing runs ok when `data` a 4 dims tensor.
Here is my environment:
```
Ubuntu 16.04
Python 3.5.2
Pytorch 1.0.1, installed with "pip install torch"
apex is installed with command:
pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .
cuda 10.0
nvidia driver 410.72
```
</issue>
<code>
[start of apex/parallel/optimized_sync_batchnorm.py]
1 import torch
2 from torch.nn.modules.batchnorm import _BatchNorm
3 from torch.nn import functional as F
4
5 import syncbn
6 from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction
7
8
9 class SyncBatchNorm(_BatchNorm):
10 """
11 synchronized batch normalization module extented from `torch.nn.BatchNormNd`
12 with the added stats reduction across multiple processes.
13 :class:`apex.parallel.SyncBatchNorm` is designed to work with
14 `DistributedDataParallel`.
15
16 When running in training mode, the layer reduces stats across all processes
17 to increase the effective batchsize for normalization layer. This is useful
18 in applications where batch size is small on a given process that would
19 diminish converged accuracy of the model. The model uses collective
20 communication package from `torch.distributed`.
21
22 When running in evaluation mode, the layer falls back to
23 `torch.nn.functional.batch_norm`
24
25 Args:
26 num_features: :math:`C` from an expected input of size
27 :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
28 eps: a value added to the denominator for numerical stability.
29 Default: 1e-5
30 momentum: the value used for the running_mean and running_var
31 computation. Can be set to ``None`` for cumulative moving average
32 (i.e. simple average). Default: 0.1
33 affine: a boolean value that when set to ``True``, this module has
34 learnable affine parameters. Default: ``True``
35 track_running_stats: a boolean value that when set to ``True``, this
36 module tracks the running mean and variance, and when set to ``False``,
37 this module does not track such statistics and always uses batch
38 statistics in both training and eval modes. Default: ``True``
39 process_group: pass in a process group within which the stats of the
40 mini-batch is being synchronized. ``None`` for using default process
41 group
42 channel_last: a boolean value that when set to ``True``, this module
43 take the last dimension of the input tensor to be the channel
44 dimension. Default: False
45
46 Examples::
47 >>> # channel first tensor
48 >>> sbn = apex.parallel.SyncBatchNorm(100).cuda()
49 >>> inp = torch.randn(10, 100, 14, 14).cuda()
50 >>> out = sbn(inp)
51 >>> inp = torch.randn(3, 100, 20).cuda()
52 >>> out = sbn(inp)
53 >>> # channel last tensor
54 >>> sbn = apex.parallel.SyncBatchNorm(100, channel_last=True).cuda()
55 >>> inp = torch.randn(10, 14, 14, 100).cuda()
56 """
57
58 def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False, fuse_relu=False):
59 super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
60 self.process_group = process_group
61 self.channel_last = channel_last
62 self.fuse_relu = fuse_relu
63
64 def _specify_process_group(self, process_group):
65 self.process_group = process_group
66
67 def _specify_channel_last(self, channel_last):
68 self.channel_last = channel_last
69
70 def forward(self, input, z = None):
71 # if input.dim() == 2, we switch to channel_last for efficient memory accessing
72 channel_last = self.channel_last if input.dim() != 2 else True
73
74 if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:
75 # fall back to pytorch implementation for inference
76 return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)
77 else:
78 exponential_average_factor = 0.0
79 if self.training and self.track_running_stats:
80 self.num_batches_tracked += 1
81 if self.momentum is None:
82 exponential_average_factor = 1.0 / float(self.num_batches_tracked)
83 else:
84 exponential_average_factor = self.momentum
85 return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)
86
[end of apex/parallel/optimized_sync_batchnorm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apex/parallel/optimized_sync_batchnorm.py b/apex/parallel/optimized_sync_batchnorm.py
--- a/apex/parallel/optimized_sync_batchnorm.py
+++ b/apex/parallel/optimized_sync_batchnorm.py
@@ -71,7 +71,7 @@
# if input.dim() == 2, we switch to channel_last for efficient memory accessing
channel_last = self.channel_last if input.dim() != 2 else True
- if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:
+ if not self.training and self.track_running_stats and not channel_last and not self.fuse_relu and z == None:
# fall back to pytorch implementation for inference
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)
else:
@@ -82,4 +82,4 @@
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else:
exponential_average_factor = self.momentum
- return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)
+ return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, channel_last, self.fuse_relu)
| {"golden_diff": "diff --git a/apex/parallel/optimized_sync_batchnorm.py b/apex/parallel/optimized_sync_batchnorm.py\n--- a/apex/parallel/optimized_sync_batchnorm.py\n+++ b/apex/parallel/optimized_sync_batchnorm.py\n@@ -71,7 +71,7 @@\n # if input.dim() == 2, we switch to channel_last for efficient memory accessing\n channel_last = self.channel_last if input.dim() != 2 else True\n \n- if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:\n+ if not self.training and self.track_running_stats and not channel_last and not self.fuse_relu and z == None:\n # fall back to pytorch implementation for inference\n return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)\n else:\n@@ -82,4 +82,4 @@\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else:\n exponential_average_factor = self.momentum\n- return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)\n+ return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, channel_last, self.fuse_relu)\n", "issue": "SyncBatchNorm doesn't support 2 dimensions input?\nHi,\r\nI'm facing the issue that the program crash when the input for SyncBatchNorm is two dimensions. Here's the code:\r\n```python\r\nimport torch\r\nimport apex\r\n\r\nmodel = apex.parallel.SyncBatchNorm(4).cuda()\r\ndata = torch.rand((8,4)).cuda()\r\noutput = model(data)\r\n```\r\nWhen running the code, error raised like this:\r\n```\r\nTraceback (most recent call last):\r\n File \"syncbn_test.by\", line 7, in <module>\r\n output = model(data)\r\n File \"/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py\", line 489, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm.py\", line 81, in forward\r\n return SyncBatchnormFunction.apply(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last)\r\n File \"/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm_kernel.py\", line 27, in forward\r\n mean, var_biased = syncbn.welford_mean_var(input)\r\nRuntimeError: Dimension out of range (expected to be in range of [-2, 1], but got 2) (maybe_wrap_dim at /pytorch/aten/src/ATen/core/WrapDimMinimal.h:18)\r\n```\r\nAnd everthing runs ok when `data` a 4 dims tensor. \r\n\r\nHere is my environment:\r\n```\r\nUbuntu 16.04\r\nPython 3.5.2\r\nPytorch 1.0.1, installed with \"pip install torch\"\r\napex is installed with command:\r\n pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" .\r\ncuda 10.0\r\nnvidia driver 410.72\r\n```\n", "before_files": [{"content": "import torch\nfrom torch.nn.modules.batchnorm import _BatchNorm\nfrom torch.nn import functional as F\n\nimport syncbn\nfrom .optimized_sync_batchnorm_kernel import SyncBatchnormFunction\n\n\nclass SyncBatchNorm(_BatchNorm):\n \"\"\"\n synchronized batch normalization module extented from `torch.nn.BatchNormNd`\n with the added stats reduction across multiple processes.\n :class:`apex.parallel.SyncBatchNorm` is designed to work with\n `DistributedDataParallel`.\n\n When running in training mode, the layer reduces stats across all processes\n to increase the effective batchsize for normalization layer. This is useful\n in applications where batch size is small on a given process that would\n diminish converged accuracy of the model. The model uses collective\n communication package from `torch.distributed`.\n\n When running in evaluation mode, the layer falls back to\n `torch.nn.functional.batch_norm`\n\n Args:\n num_features: :math:`C` from an expected input of size\n :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Can be set to ``None`` for cumulative moving average\n (i.e. simple average). Default: 0.1\n affine: a boolean value that when set to ``True``, this module has\n learnable affine parameters. Default: ``True``\n track_running_stats: a boolean value that when set to ``True``, this\n module tracks the running mean and variance, and when set to ``False``,\n this module does not track such statistics and always uses batch\n statistics in both training and eval modes. Default: ``True``\n process_group: pass in a process group within which the stats of the\n mini-batch is being synchronized. ``None`` for using default process\n group\n channel_last: a boolean value that when set to ``True``, this module\n take the last dimension of the input tensor to be the channel\n dimension. Default: False\n\n Examples::\n >>> # channel first tensor\n >>> sbn = apex.parallel.SyncBatchNorm(100).cuda()\n >>> inp = torch.randn(10, 100, 14, 14).cuda()\n >>> out = sbn(inp)\n >>> inp = torch.randn(3, 100, 20).cuda()\n >>> out = sbn(inp)\n >>> # channel last tensor\n >>> sbn = apex.parallel.SyncBatchNorm(100, channel_last=True).cuda()\n >>> inp = torch.randn(10, 14, 14, 100).cuda()\n \"\"\"\n\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False, fuse_relu=False):\n super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)\n self.process_group = process_group\n self.channel_last = channel_last\n self.fuse_relu = fuse_relu\n\n def _specify_process_group(self, process_group):\n self.process_group = process_group\n\n def _specify_channel_last(self, channel_last):\n self.channel_last = channel_last\n\n def forward(self, input, z = None):\n # if input.dim() == 2, we switch to channel_last for efficient memory accessing\n channel_last = self.channel_last if input.dim() != 2 else True\n\n if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:\n # fall back to pytorch implementation for inference\n return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)\n else:\n exponential_average_factor = 0.0\n if self.training and self.track_running_stats:\n self.num_batches_tracked += 1\n if self.momentum is None:\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else:\n exponential_average_factor = self.momentum\n return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)\n", "path": "apex/parallel/optimized_sync_batchnorm.py"}]} | 2,148 | 349 |
gh_patches_debug_23483 | rasdani/github-patches | git_diff | getredash__redash-3088 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Access current user id/details from Python data source
First of all, thanks to the authors and contributors for a very interesting and promising tool.
### Question:
Is it possible to access current user id/details from Python data source?
I would like to be able to control access to the viewed data at row level without a need of creating and maintaining multiple (in my case about 80) data sources and dashboards. My desired scenario:
1. Store A logs into their account and accesses dashboard with KPI/Metrics.
2. Dashboard calls Python data source(s).
3. Python code loads data from a db table with all stores, identifies current user and filters out all records where store != Store A.
4. Dashboard is dynamically loaded with entries for Store A only.
Also if there are any other ways to achieve the above, please advise.
### Technical details:
* Redash Version: 0.12.0
</issue>
<code>
[start of redash/query_runner/python.py]
1 import datetime
2 import importlib
3 import logging
4 import sys
5
6 from redash.query_runner import *
7 from redash.utils import json_dumps, json_loads
8 from redash import models
9 from RestrictedPython import compile_restricted
10 from RestrictedPython.Guards import safe_builtins
11
12
13 logger = logging.getLogger(__name__)
14
15
16 class CustomPrint(object):
17 """CustomPrint redirect "print" calls to be sent as "log" on the result object."""
18 def __init__(self):
19 self.enabled = True
20 self.lines = []
21
22 def write(self, text):
23 if self.enabled:
24 if text and text.strip():
25 log_line = "[{0}] {1}".format(datetime.datetime.utcnow().isoformat(), text)
26 self.lines.append(log_line)
27
28 def enable(self):
29 self.enabled = True
30
31 def disable(self):
32 self.enabled = False
33
34 def __call__(self):
35 return self
36
37
38 class Python(BaseQueryRunner):
39 safe_builtins = (
40 'sorted', 'reversed', 'map', 'reduce', 'any', 'all',
41 'slice', 'filter', 'len', 'next', 'enumerate',
42 'sum', 'abs', 'min', 'max', 'round', 'cmp', 'divmod',
43 'str', 'unicode', 'int', 'float', 'complex',
44 'tuple', 'set', 'list', 'dict', 'bool',
45 )
46
47 @classmethod
48 def configuration_schema(cls):
49 return {
50 'type': 'object',
51 'properties': {
52 'allowedImportModules': {
53 'type': 'string',
54 'title': 'Modules to import prior to running the script'
55 },
56 'additionalModulesPaths': {
57 'type': 'string'
58 }
59 },
60 }
61
62 @classmethod
63 def enabled(cls):
64 return True
65
66 @classmethod
67 def annotate_query(cls):
68 return False
69
70 def __init__(self, configuration):
71 super(Python, self).__init__(configuration)
72
73 self.syntax = "python"
74
75 self._allowed_modules = {}
76 self._script_locals = {"result": {"rows": [], "columns": [], "log": []}}
77 self._enable_print_log = True
78 self._custom_print = CustomPrint()
79
80 if self.configuration.get("allowedImportModules", None):
81 for item in self.configuration["allowedImportModules"].split(","):
82 self._allowed_modules[item] = None
83
84 if self.configuration.get("additionalModulesPaths", None):
85 for p in self.configuration["additionalModulesPaths"].split(","):
86 if p not in sys.path:
87 sys.path.append(p)
88
89 def custom_import(self, name, globals=None, locals=None, fromlist=(), level=0):
90 if name in self._allowed_modules:
91 m = None
92 if self._allowed_modules[name] is None:
93 m = importlib.import_module(name)
94 self._allowed_modules[name] = m
95 else:
96 m = self._allowed_modules[name]
97
98 return m
99
100 raise Exception("'{0}' is not configured as a supported import module".format(name))
101
102 @staticmethod
103 def custom_write(obj):
104 """
105 Custom hooks which controls the way objects/lists/tuples/dicts behave in
106 RestrictedPython
107 """
108 return obj
109
110 @staticmethod
111 def custom_get_item(obj, key):
112 return obj[key]
113
114 @staticmethod
115 def custom_get_iter(obj):
116 return iter(obj)
117
118 @staticmethod
119 def add_result_column(result, column_name, friendly_name, column_type):
120 """Helper function to add columns inside a Python script running in Redash in an easier way
121
122 Parameters:
123 :result dict: The result dict
124 :column_name string: Name of the column, which should be consisted of lowercase latin letters or underscore.
125 :friendly_name string: Name of the column for display
126 :column_type string: Type of the column. Check supported data types for details.
127 """
128 if column_type not in SUPPORTED_COLUMN_TYPES:
129 raise Exception("'{0}' is not a supported column type".format(column_type))
130
131 if "columns" not in result:
132 result["columns"] = []
133
134 result["columns"].append({
135 "name": column_name,
136 "friendly_name": friendly_name,
137 "type": column_type
138 })
139
140 @staticmethod
141 def add_result_row(result, values):
142 """Helper function to add one row to results set.
143
144 Parameters:
145 :result dict: The result dict
146 :values dict: One row of result in dict. The key should be one of the column names. The value is the value of the column in this row.
147 """
148 if "rows" not in result:
149 result["rows"] = []
150
151 result["rows"].append(values)
152
153 @staticmethod
154 def execute_query(data_source_name_or_id, query):
155 """Run query from specific data source.
156
157 Parameters:
158 :data_source_name_or_id string|integer: Name or ID of the data source
159 :query string: Query to run
160 """
161 try:
162 if type(data_source_name_or_id) == int:
163 data_source = models.DataSource.get_by_id(data_source_name_or_id)
164 else:
165 data_source = models.DataSource.get_by_name(data_source_name_or_id)
166 except models.NoResultFound:
167 raise Exception("Wrong data source name/id: %s." % data_source_name_or_id)
168
169 # TODO: pass the user here...
170 data, error = data_source.query_runner.run_query(query, None)
171 if error is not None:
172 raise Exception(error)
173
174 # TODO: allow avoiding the JSON dumps/loads in same process
175 return json_loads(data)
176
177 @staticmethod
178 def get_source_schema(data_source_name_or_id):
179 """Get schema from specific data source.
180
181 :param data_source_name_or_id: string|integer: Name or ID of the data source
182 :return:
183 """
184 try:
185 if type(data_source_name_or_id) == int:
186 data_source = models.DataSource.get_by_id(data_source_name_or_id)
187 else:
188 data_source = models.DataSource.get_by_name(data_source_name_or_id)
189 except models.NoResultFound:
190 raise Exception("Wrong data source name/id: %s." % data_source_name_or_id)
191 schema = data_source.query_runner.get_schema()
192 return schema
193
194 @staticmethod
195 def get_query_result(query_id):
196 """Get result of an existing query.
197
198 Parameters:
199 :query_id integer: ID of existing query
200 """
201 try:
202 query = models.Query.get_by_id(query_id)
203 except models.NoResultFound:
204 raise Exception("Query id %s does not exist." % query_id)
205
206 if query.latest_query_data is None:
207 raise Exception("Query does not have results yet.")
208
209 if query.latest_query_data.data is None:
210 raise Exception("Query does not have results yet.")
211
212 return json_loads(query.latest_query_data.data)
213
214 def test_connection(self):
215 pass
216
217 def run_query(self, query, user):
218 try:
219 error = None
220
221 code = compile_restricted(query, '<string>', 'exec')
222
223 builtins = safe_builtins.copy()
224 builtins["_write_"] = self.custom_write
225 builtins["__import__"] = self.custom_import
226 builtins["_getattr_"] = getattr
227 builtins["getattr"] = getattr
228 builtins["_setattr_"] = setattr
229 builtins["setattr"] = setattr
230 builtins["_getitem_"] = self.custom_get_item
231 builtins["_getiter_"] = self.custom_get_iter
232 builtins["_print_"] = self._custom_print
233
234 # Layer in our own additional set of builtins that we have
235 # considered safe.
236 for key in self.safe_builtins:
237 builtins[key] = __builtins__[key]
238
239 restricted_globals = dict(__builtins__=builtins)
240 restricted_globals["get_query_result"] = self.get_query_result
241 restricted_globals["get_source_schema"] = self.get_source_schema
242 restricted_globals["execute_query"] = self.execute_query
243 restricted_globals["add_result_column"] = self.add_result_column
244 restricted_globals["add_result_row"] = self.add_result_row
245 restricted_globals["disable_print_log"] = self._custom_print.disable
246 restricted_globals["enable_print_log"] = self._custom_print.enable
247
248 # Supported data types
249 restricted_globals["TYPE_DATETIME"] = TYPE_DATETIME
250 restricted_globals["TYPE_BOOLEAN"] = TYPE_BOOLEAN
251 restricted_globals["TYPE_INTEGER"] = TYPE_INTEGER
252 restricted_globals["TYPE_STRING"] = TYPE_STRING
253 restricted_globals["TYPE_DATE"] = TYPE_DATE
254 restricted_globals["TYPE_FLOAT"] = TYPE_FLOAT
255
256
257 # TODO: Figure out the best way to have a timeout on a script
258 # One option is to use ETA with Celery + timeouts on workers
259 # And replacement of worker process every X requests handled.
260
261 exec((code), restricted_globals, self._script_locals)
262
263 result = self._script_locals['result']
264 result['log'] = self._custom_print.lines
265 json_data = json_dumps(result)
266 except KeyboardInterrupt:
267 error = "Query cancelled by user."
268 json_data = None
269 except Exception as e:
270 error = str(type(e)) + " " + str(e)
271 json_data = None
272
273 return json_data, error
274
275
276 register(Python)
277
[end of redash/query_runner/python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/query_runner/python.py b/redash/query_runner/python.py
--- a/redash/query_runner/python.py
+++ b/redash/query_runner/python.py
@@ -211,10 +211,15 @@
return json_loads(query.latest_query_data.data)
+ def get_current_user(self):
+ return self._current_user.to_dict()
+
def test_connection(self):
pass
def run_query(self, query, user):
+ self._current_user = user
+
try:
error = None
@@ -239,6 +244,7 @@
restricted_globals = dict(__builtins__=builtins)
restricted_globals["get_query_result"] = self.get_query_result
restricted_globals["get_source_schema"] = self.get_source_schema
+ restricted_globals["get_current_user"] = self.get_current_user
restricted_globals["execute_query"] = self.execute_query
restricted_globals["add_result_column"] = self.add_result_column
restricted_globals["add_result_row"] = self.add_result_row
| {"golden_diff": "diff --git a/redash/query_runner/python.py b/redash/query_runner/python.py\n--- a/redash/query_runner/python.py\n+++ b/redash/query_runner/python.py\n@@ -211,10 +211,15 @@\n \n return json_loads(query.latest_query_data.data)\n \n+ def get_current_user(self):\n+ return self._current_user.to_dict()\n+\n def test_connection(self):\n pass\n \n def run_query(self, query, user):\n+ self._current_user = user\n+\n try:\n error = None\n \n@@ -239,6 +244,7 @@\n restricted_globals = dict(__builtins__=builtins)\n restricted_globals[\"get_query_result\"] = self.get_query_result\n restricted_globals[\"get_source_schema\"] = self.get_source_schema\n+ restricted_globals[\"get_current_user\"] = self.get_current_user\n restricted_globals[\"execute_query\"] = self.execute_query\n restricted_globals[\"add_result_column\"] = self.add_result_column\n restricted_globals[\"add_result_row\"] = self.add_result_row\n", "issue": "Access current user id/details from Python data source \nFirst of all, thanks to the authors and contributors for a very interesting and promising tool. \r\n### Question:\r\nIs it possible to access current user id/details from Python data source?\r\nI would like to be able to control access to the viewed data at row level without a need of creating and maintaining multiple (in my case about 80) data sources and dashboards. My desired scenario:\r\n 1. Store A logs into their account and accesses dashboard with KPI/Metrics.\r\n 2. Dashboard calls Python data source(s).\r\n 3. Python code loads data from a db table with all stores, identifies current user and filters out all records where store != Store A.\r\n 4. Dashboard is dynamically loaded with entries for Store A only.\r\n\r\nAlso if there are any other ways to achieve the above, please advise. \r\n\r\n### Technical details:\r\n* Redash Version: 0.12.0\n", "before_files": [{"content": "import datetime\nimport importlib\nimport logging\nimport sys\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\nfrom redash import models\nfrom RestrictedPython import compile_restricted\nfrom RestrictedPython.Guards import safe_builtins\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomPrint(object):\n \"\"\"CustomPrint redirect \"print\" calls to be sent as \"log\" on the result object.\"\"\"\n def __init__(self):\n self.enabled = True\n self.lines = []\n\n def write(self, text):\n if self.enabled:\n if text and text.strip():\n log_line = \"[{0}] {1}\".format(datetime.datetime.utcnow().isoformat(), text)\n self.lines.append(log_line)\n\n def enable(self):\n self.enabled = True\n\n def disable(self):\n self.enabled = False\n\n def __call__(self):\n return self\n\n\nclass Python(BaseQueryRunner):\n safe_builtins = (\n 'sorted', 'reversed', 'map', 'reduce', 'any', 'all',\n 'slice', 'filter', 'len', 'next', 'enumerate',\n 'sum', 'abs', 'min', 'max', 'round', 'cmp', 'divmod',\n 'str', 'unicode', 'int', 'float', 'complex',\n 'tuple', 'set', 'list', 'dict', 'bool',\n )\n\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'allowedImportModules': {\n 'type': 'string',\n 'title': 'Modules to import prior to running the script'\n },\n 'additionalModulesPaths': {\n 'type': 'string'\n }\n },\n }\n\n @classmethod\n def enabled(cls):\n return True\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def __init__(self, configuration):\n super(Python, self).__init__(configuration)\n\n self.syntax = \"python\"\n\n self._allowed_modules = {}\n self._script_locals = {\"result\": {\"rows\": [], \"columns\": [], \"log\": []}}\n self._enable_print_log = True\n self._custom_print = CustomPrint()\n\n if self.configuration.get(\"allowedImportModules\", None):\n for item in self.configuration[\"allowedImportModules\"].split(\",\"):\n self._allowed_modules[item] = None\n\n if self.configuration.get(\"additionalModulesPaths\", None):\n for p in self.configuration[\"additionalModulesPaths\"].split(\",\"):\n if p not in sys.path:\n sys.path.append(p)\n\n def custom_import(self, name, globals=None, locals=None, fromlist=(), level=0):\n if name in self._allowed_modules:\n m = None\n if self._allowed_modules[name] is None:\n m = importlib.import_module(name)\n self._allowed_modules[name] = m\n else:\n m = self._allowed_modules[name]\n\n return m\n\n raise Exception(\"'{0}' is not configured as a supported import module\".format(name))\n\n @staticmethod\n def custom_write(obj):\n \"\"\"\n Custom hooks which controls the way objects/lists/tuples/dicts behave in\n RestrictedPython\n \"\"\"\n return obj\n\n @staticmethod\n def custom_get_item(obj, key):\n return obj[key]\n\n @staticmethod\n def custom_get_iter(obj):\n return iter(obj)\n\n @staticmethod\n def add_result_column(result, column_name, friendly_name, column_type):\n \"\"\"Helper function to add columns inside a Python script running in Redash in an easier way\n\n Parameters:\n :result dict: The result dict\n :column_name string: Name of the column, which should be consisted of lowercase latin letters or underscore.\n :friendly_name string: Name of the column for display\n :column_type string: Type of the column. Check supported data types for details.\n \"\"\"\n if column_type not in SUPPORTED_COLUMN_TYPES:\n raise Exception(\"'{0}' is not a supported column type\".format(column_type))\n\n if \"columns\" not in result:\n result[\"columns\"] = []\n\n result[\"columns\"].append({\n \"name\": column_name,\n \"friendly_name\": friendly_name,\n \"type\": column_type\n })\n\n @staticmethod\n def add_result_row(result, values):\n \"\"\"Helper function to add one row to results set.\n\n Parameters:\n :result dict: The result dict\n :values dict: One row of result in dict. The key should be one of the column names. The value is the value of the column in this row.\n \"\"\"\n if \"rows\" not in result:\n result[\"rows\"] = []\n\n result[\"rows\"].append(values)\n\n @staticmethod\n def execute_query(data_source_name_or_id, query):\n \"\"\"Run query from specific data source.\n\n Parameters:\n :data_source_name_or_id string|integer: Name or ID of the data source\n :query string: Query to run\n \"\"\"\n try:\n if type(data_source_name_or_id) == int:\n data_source = models.DataSource.get_by_id(data_source_name_or_id)\n else:\n data_source = models.DataSource.get_by_name(data_source_name_or_id)\n except models.NoResultFound:\n raise Exception(\"Wrong data source name/id: %s.\" % data_source_name_or_id)\n\n # TODO: pass the user here...\n data, error = data_source.query_runner.run_query(query, None)\n if error is not None:\n raise Exception(error)\n\n # TODO: allow avoiding the JSON dumps/loads in same process\n return json_loads(data)\n\n @staticmethod\n def get_source_schema(data_source_name_or_id):\n \"\"\"Get schema from specific data source.\n\n :param data_source_name_or_id: string|integer: Name or ID of the data source\n :return:\n \"\"\"\n try:\n if type(data_source_name_or_id) == int:\n data_source = models.DataSource.get_by_id(data_source_name_or_id)\n else:\n data_source = models.DataSource.get_by_name(data_source_name_or_id)\n except models.NoResultFound:\n raise Exception(\"Wrong data source name/id: %s.\" % data_source_name_or_id)\n schema = data_source.query_runner.get_schema()\n return schema\n\n @staticmethod\n def get_query_result(query_id):\n \"\"\"Get result of an existing query.\n\n Parameters:\n :query_id integer: ID of existing query\n \"\"\"\n try:\n query = models.Query.get_by_id(query_id)\n except models.NoResultFound:\n raise Exception(\"Query id %s does not exist.\" % query_id)\n\n if query.latest_query_data is None:\n raise Exception(\"Query does not have results yet.\")\n\n if query.latest_query_data.data is None:\n raise Exception(\"Query does not have results yet.\")\n\n return json_loads(query.latest_query_data.data)\n\n def test_connection(self):\n pass\n\n def run_query(self, query, user):\n try:\n error = None\n\n code = compile_restricted(query, '<string>', 'exec')\n\n builtins = safe_builtins.copy()\n builtins[\"_write_\"] = self.custom_write\n builtins[\"__import__\"] = self.custom_import\n builtins[\"_getattr_\"] = getattr\n builtins[\"getattr\"] = getattr\n builtins[\"_setattr_\"] = setattr\n builtins[\"setattr\"] = setattr\n builtins[\"_getitem_\"] = self.custom_get_item\n builtins[\"_getiter_\"] = self.custom_get_iter\n builtins[\"_print_\"] = self._custom_print\n\n # Layer in our own additional set of builtins that we have\n # considered safe.\n for key in self.safe_builtins:\n builtins[key] = __builtins__[key]\n\n restricted_globals = dict(__builtins__=builtins)\n restricted_globals[\"get_query_result\"] = self.get_query_result\n restricted_globals[\"get_source_schema\"] = self.get_source_schema\n restricted_globals[\"execute_query\"] = self.execute_query\n restricted_globals[\"add_result_column\"] = self.add_result_column\n restricted_globals[\"add_result_row\"] = self.add_result_row\n restricted_globals[\"disable_print_log\"] = self._custom_print.disable\n restricted_globals[\"enable_print_log\"] = self._custom_print.enable\n\n # Supported data types\n restricted_globals[\"TYPE_DATETIME\"] = TYPE_DATETIME\n restricted_globals[\"TYPE_BOOLEAN\"] = TYPE_BOOLEAN\n restricted_globals[\"TYPE_INTEGER\"] = TYPE_INTEGER\n restricted_globals[\"TYPE_STRING\"] = TYPE_STRING\n restricted_globals[\"TYPE_DATE\"] = TYPE_DATE\n restricted_globals[\"TYPE_FLOAT\"] = TYPE_FLOAT\n\n\n # TODO: Figure out the best way to have a timeout on a script\n # One option is to use ETA with Celery + timeouts on workers\n # And replacement of worker process every X requests handled.\n\n exec((code), restricted_globals, self._script_locals)\n\n result = self._script_locals['result']\n result['log'] = self._custom_print.lines\n json_data = json_dumps(result)\n except KeyboardInterrupt:\n error = \"Query cancelled by user.\"\n json_data = None\n except Exception as e:\n error = str(type(e)) + \" \" + str(e)\n json_data = None\n\n return json_data, error\n\n\nregister(Python)\n", "path": "redash/query_runner/python.py"}]} | 3,500 | 231 |
gh_patches_debug_25801 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Results framework loads very slowly for projects with lot of indicator dimensions
</issue>
<code>
[start of akvo/rest/views/indicator_dimension.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimension
9
10 from ..serializers import IndicatorDimensionSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimension.objects.all()
18 serializer_class = IndicatorDimensionSerializer
19 project_relation = 'indicator__result__project__'
20
[end of akvo/rest/views/indicator_dimension.py]
[start of akvo/rest/pagination.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from rest_framework import pagination
8 from rest_framework.response import Response
9
10
11 class LimitSizePageNumberPagination(pagination.PageNumberPagination):
12 page_size = 30
13 page_size_query_param = 'limit'
14 max_page_size = 100
15
16
17 class TastypieOffsetPagination(pagination.LimitOffsetPagination):
18
19 def get_paginated_response(self, data):
20 """ Emulate the old style Tastypie format if the URL contains /api/v1/
21 """
22 return Response({
23 'meta': {
24 'next': self.get_next_link(),
25 'previous': self.get_previous_link(),
26 'total_count': self.count,
27 'limit': self.limit,
28 'offset': self.offset,
29 },
30 'objects': data
31 })
32
[end of akvo/rest/pagination.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py
--- a/akvo/rest/pagination.py
+++ b/akvo/rest/pagination.py
@@ -8,12 +8,18 @@
from rest_framework.response import Response
-class LimitSizePageNumberPagination(pagination.PageNumberPagination):
+class StandardSizePageNumberPagination(pagination.PageNumberPagination):
page_size = 30
page_size_query_param = 'limit'
max_page_size = 100
+class LargeSizePageNumberPagination(pagination.PageNumberPagination):
+ page_size = 100
+ page_size_query_param = 'limit'
+ max_page_size = 1000
+
+
class TastypieOffsetPagination(pagination.LimitOffsetPagination):
def get_paginated_response(self, data):
diff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py
--- a/akvo/rest/views/indicator_dimension.py
+++ b/akvo/rest/views/indicator_dimension.py
@@ -6,6 +6,7 @@
from akvo.rsr.models import IndicatorDimension
+from akvo.rest.pagination import LargeSizePageNumberPagination
from ..serializers import IndicatorDimensionSerializer
from ..viewsets import PublicProjectViewSet
@@ -17,3 +18,4 @@
queryset = IndicatorDimension.objects.all()
serializer_class = IndicatorDimensionSerializer
project_relation = 'indicator__result__project__'
+ pagination_class = LargeSizePageNumberPagination
| {"golden_diff": "diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py\n--- a/akvo/rest/pagination.py\n+++ b/akvo/rest/pagination.py\n@@ -8,12 +8,18 @@\n from rest_framework.response import Response\n \n \n-class LimitSizePageNumberPagination(pagination.PageNumberPagination):\n+class StandardSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n \n \n+class LargeSizePageNumberPagination(pagination.PageNumberPagination):\n+ page_size = 100\n+ page_size_query_param = 'limit'\n+ max_page_size = 1000\n+\n+\n class TastypieOffsetPagination(pagination.LimitOffsetPagination):\n \n def get_paginated_response(self, data):\ndiff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py\n--- a/akvo/rest/views/indicator_dimension.py\n+++ b/akvo/rest/views/indicator_dimension.py\n@@ -6,6 +6,7 @@\n \n \n from akvo.rsr.models import IndicatorDimension\n+from akvo.rest.pagination import LargeSizePageNumberPagination\n \n from ..serializers import IndicatorDimensionSerializer\n from ..viewsets import PublicProjectViewSet\n@@ -17,3 +18,4 @@\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n+ pagination_class = LargeSizePageNumberPagination\n", "issue": "Results framework loads very slowly for projects with lot of indicator dimensions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimension\n\nfrom ..serializers import IndicatorDimensionSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n", "path": "akvo/rest/views/indicator_dimension.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass LimitSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n\n\nclass TastypieOffsetPagination(pagination.LimitOffsetPagination):\n\n def get_paginated_response(self, data):\n \"\"\" Emulate the old style Tastypie format if the URL contains /api/v1/\n \"\"\"\n return Response({\n 'meta': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link(),\n 'total_count': self.count,\n 'limit': self.limit,\n 'offset': self.offset,\n },\n 'objects': data\n })\n", "path": "akvo/rest/pagination.py"}]} | 1,033 | 336 |
gh_patches_debug_317 | rasdani/github-patches | git_diff | jazzband__pip-tools-1871 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Convert the README from rst to md
<!--- Describe the changes here. --->
This PR converts the documentation from README.rst to README.md
Related: https://github.com/jazzband/pip-tools/issues/1856
##### Contributor checklist
- [ ] Provided the tests for the changes.
- [x] Assure PR title is short, clear, and good to be included in the user-oriented changelog
##### Maintainer checklist
- [ ] Assure one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.
- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).
</issue>
<code>
[start of docs/conf.py]
1 # https://www.sphinx-doc.org/en/master/usage/configuration.html
2 """Configuration file for the Sphinx documentation builder."""
3
4 from __future__ import annotations
5
6 from functools import partial
7 from pathlib import Path
8
9 from setuptools_scm import get_version
10
11 # -- Path setup --------------------------------------------------------------
12
13 PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
14 get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
15
16
17 # -- Project information -----------------------------------------------------
18
19 project = "pip-tools"
20 author = f"{project} Contributors"
21 copyright = f"The {author}"
22
23 # The short X.Y version
24 version = ".".join(
25 get_scm_version(
26 local_scheme="no-local-version",
27 ).split(
28 "."
29 )[:3],
30 )
31
32 # The full version, including alpha/beta/rc tags
33 release = get_scm_version()
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = ["myst_parser"]
42
43
44 # -- Options for HTML output -------------------------------------------------
45
46 # The theme to use for HTML and HTML Help pages. See the documentation for
47 # a list of builtin themes.
48 #
49 html_theme = "furo"
50
51
52 # -------------------------------------------------------------------------
53 default_role = "any"
54 nitpicky = True
55
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -52,3 +52,4 @@
# -------------------------------------------------------------------------
default_role = "any"
nitpicky = True
+suppress_warnings = ["myst.xref_missing"]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -52,3 +52,4 @@\n # -------------------------------------------------------------------------\n default_role = \"any\"\n nitpicky = True\n+suppress_warnings = [\"myst.xref_missing\"]\n", "issue": "Convert the README from rst to md\n<!--- Describe the changes here. --->\r\nThis PR converts the documentation from README.rst to README.md\r\nRelated: https://github.com/jazzband/pip-tools/issues/1856\r\n##### Contributor checklist\r\n\r\n- [ ] Provided the tests for the changes.\r\n- [x] Assure PR title is short, clear, and good to be included in the user-oriented changelog\r\n\r\n##### Maintainer checklist\r\n\r\n- [ ] Assure one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.\r\n- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).\r\n\n", "before_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n", "path": "docs/conf.py"}]} | 1,115 | 61 |
gh_patches_debug_40399 | rasdani/github-patches | git_diff | SeldonIO__MLServer-233 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support MLflow current protocol
As a follow-up to #167, it would be interesting to explore adding a custom endpoint to the `mlserver-mlflow` runtime which supports [MLflow's existing API](https://www.mlflow.org/docs/latest/models.html#deploy-mlflow-models). This would help reduce friction on user adoption of MLSever, as well as a temporary stopgap for users while they adopt the V2 protocol.
</issue>
<code>
[start of runtimes/mlflow/mlserver_mlflow/runtime.py]
1 import mlflow
2
3 from mlserver.types import InferenceRequest, InferenceResponse
4 from mlserver.model import MLModel
5 from mlserver.utils import get_model_uri
6 from mlserver.codecs import get_decoded_or_raw
7
8 from .encoding import to_outputs
9
10
11 class MLflowRuntime(MLModel):
12 """
13 Implementation of the MLModel interface to load and serve `scikit-learn`
14 models persisted with `joblib`.
15 """
16
17 async def load(self) -> bool:
18 # TODO: Log info message
19 model_uri = await get_model_uri(self._settings)
20 self._model = mlflow.pyfunc.load_model(model_uri)
21
22 self.ready = True
23 return self.ready
24
25 async def predict(self, payload: InferenceRequest) -> InferenceResponse:
26 decoded_payload = get_decoded_or_raw(payload)
27
28 # TODO: Can `output` be a dictionary of tensors?
29 model_output = self._model.predict(decoded_payload)
30
31 return InferenceResponse(
32 model_name=self.name,
33 model_version=self.version,
34 outputs=to_outputs(model_output),
35 )
36
[end of runtimes/mlflow/mlserver_mlflow/runtime.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/runtimes/mlflow/mlserver_mlflow/runtime.py b/runtimes/mlflow/mlserver_mlflow/runtime.py
--- a/runtimes/mlflow/mlserver_mlflow/runtime.py
+++ b/runtimes/mlflow/mlserver_mlflow/runtime.py
@@ -1,9 +1,29 @@
import mlflow
+from io import StringIO
+from fastapi import Request, Response
+
+from mlflow.exceptions import MlflowException
+from mlflow.pyfunc.scoring_server import (
+ CONTENT_TYPES,
+ CONTENT_TYPE_CSV,
+ CONTENT_TYPE_JSON,
+ CONTENT_TYPE_JSON_SPLIT_ORIENTED,
+ CONTENT_TYPE_JSON_RECORDS_ORIENTED,
+ CONTENT_TYPE_JSON_SPLIT_NUMPY,
+ parse_csv_input,
+ infer_and_parse_json_input,
+ parse_json_input,
+ parse_split_oriented_json_input_to_numpy,
+ predictions_to_json,
+)
+
from mlserver.types import InferenceRequest, InferenceResponse
from mlserver.model import MLModel
from mlserver.utils import get_model_uri
from mlserver.codecs import get_decoded_or_raw
+from mlserver.handlers import custom_handler
+from mlserver.errors import InferenceError
from .encoding import to_outputs
@@ -14,10 +34,68 @@
models persisted with `joblib`.
"""
+ # TODO: Decouple from REST
+ @custom_handler(rest_path="/invocations")
+ async def invocations(self, request: Request) -> Response:
+ """
+ This custom handler is meant to mimic the behaviour of the existing
+ scoring server in MLflow.
+ For details about its implementation, please consult the original
+ implementation in the MLflow repository:
+
+ https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py
+ """
+ content_type = request.headers.get("content-type", None)
+ raw_data = await request.body()
+ as_str = raw_data.decode("utf-8")
+
+ if content_type == CONTENT_TYPE_CSV:
+ csv_input = StringIO(as_str)
+ data = parse_csv_input(csv_input=csv_input)
+ elif content_type == CONTENT_TYPE_JSON:
+ data = infer_and_parse_json_input(as_str, self._input_schema)
+ elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:
+ data = parse_json_input(
+ json_input=StringIO(as_str),
+ orient="split",
+ schema=self._input_schema,
+ )
+ elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:
+ data = parse_json_input(
+ json_input=StringIO(as_str),
+ orient="records",
+ schema=self._input_schema,
+ )
+ elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:
+ data = parse_split_oriented_json_input_to_numpy(as_str)
+ else:
+ content_type_error_message = (
+ "This predictor only supports the following content types, "
+ f"{CONTENT_TYPES}. Got '{content_type}'."
+ )
+ raise InferenceError(content_type_error_message)
+
+ try:
+ raw_predictions = self._model.predict(data)
+ except MlflowException as e:
+ raise InferenceError(e.message)
+ except Exception:
+ error_message = (
+ "Encountered an unexpected error while evaluating the model. Verify"
+ " that the serialized input Dataframe is compatible with the model for"
+ " inference."
+ )
+ raise InferenceError(error_message)
+
+ result = StringIO()
+ predictions_to_json(raw_predictions, result)
+ return Response(content=result.getvalue(), media_type="application/json")
+
async def load(self) -> bool:
# TODO: Log info message
model_uri = await get_model_uri(self._settings)
self._model = mlflow.pyfunc.load_model(model_uri)
+ self._input_schema = self._model.metadata.get_input_schema()
self.ready = True
return self.ready
| {"golden_diff": "diff --git a/runtimes/mlflow/mlserver_mlflow/runtime.py b/runtimes/mlflow/mlserver_mlflow/runtime.py\n--- a/runtimes/mlflow/mlserver_mlflow/runtime.py\n+++ b/runtimes/mlflow/mlserver_mlflow/runtime.py\n@@ -1,9 +1,29 @@\n import mlflow\n \n+from io import StringIO\n+from fastapi import Request, Response\n+\n+from mlflow.exceptions import MlflowException\n+from mlflow.pyfunc.scoring_server import (\n+ CONTENT_TYPES,\n+ CONTENT_TYPE_CSV,\n+ CONTENT_TYPE_JSON,\n+ CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n+ CONTENT_TYPE_JSON_RECORDS_ORIENTED,\n+ CONTENT_TYPE_JSON_SPLIT_NUMPY,\n+ parse_csv_input,\n+ infer_and_parse_json_input,\n+ parse_json_input,\n+ parse_split_oriented_json_input_to_numpy,\n+ predictions_to_json,\n+)\n+\n from mlserver.types import InferenceRequest, InferenceResponse\n from mlserver.model import MLModel\n from mlserver.utils import get_model_uri\n from mlserver.codecs import get_decoded_or_raw\n+from mlserver.handlers import custom_handler\n+from mlserver.errors import InferenceError\n \n from .encoding import to_outputs\n \n@@ -14,10 +34,68 @@\n models persisted with `joblib`.\n \"\"\"\n \n+ # TODO: Decouple from REST\n+ @custom_handler(rest_path=\"/invocations\")\n+ async def invocations(self, request: Request) -> Response:\n+ \"\"\"\n+ This custom handler is meant to mimic the behaviour of the existing\n+ scoring server in MLflow.\n+ For details about its implementation, please consult the original\n+ implementation in the MLflow repository:\n+\n+ https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py\n+ \"\"\"\n+ content_type = request.headers.get(\"content-type\", None)\n+ raw_data = await request.body()\n+ as_str = raw_data.decode(\"utf-8\")\n+\n+ if content_type == CONTENT_TYPE_CSV:\n+ csv_input = StringIO(as_str)\n+ data = parse_csv_input(csv_input=csv_input)\n+ elif content_type == CONTENT_TYPE_JSON:\n+ data = infer_and_parse_json_input(as_str, self._input_schema)\n+ elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:\n+ data = parse_json_input(\n+ json_input=StringIO(as_str),\n+ orient=\"split\",\n+ schema=self._input_schema,\n+ )\n+ elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:\n+ data = parse_json_input(\n+ json_input=StringIO(as_str),\n+ orient=\"records\",\n+ schema=self._input_schema,\n+ )\n+ elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:\n+ data = parse_split_oriented_json_input_to_numpy(as_str)\n+ else:\n+ content_type_error_message = (\n+ \"This predictor only supports the following content types, \"\n+ f\"{CONTENT_TYPES}. Got '{content_type}'.\"\n+ )\n+ raise InferenceError(content_type_error_message)\n+\n+ try:\n+ raw_predictions = self._model.predict(data)\n+ except MlflowException as e:\n+ raise InferenceError(e.message)\n+ except Exception:\n+ error_message = (\n+ \"Encountered an unexpected error while evaluating the model. Verify\"\n+ \" that the serialized input Dataframe is compatible with the model for\"\n+ \" inference.\"\n+ )\n+ raise InferenceError(error_message)\n+\n+ result = StringIO()\n+ predictions_to_json(raw_predictions, result)\n+ return Response(content=result.getvalue(), media_type=\"application/json\")\n+\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(self._settings)\n self._model = mlflow.pyfunc.load_model(model_uri)\n+ self._input_schema = self._model.metadata.get_input_schema()\n \n self.ready = True\n return self.ready\n", "issue": "Support MLflow current protocol\nAs a follow-up to #167, it would be interesting to explore adding a custom endpoint to the `mlserver-mlflow` runtime which supports [MLflow's existing API](https://www.mlflow.org/docs/latest/models.html#deploy-mlflow-models). This would help reduce friction on user adoption of MLSever, as well as a temporary stopgap for users while they adopt the V2 protocol.\n", "before_files": [{"content": "import mlflow\n\nfrom mlserver.types import InferenceRequest, InferenceResponse\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import get_decoded_or_raw\n\nfrom .encoding import to_outputs\n\n\nclass MLflowRuntime(MLModel):\n \"\"\"\n Implementation of the MLModel interface to load and serve `scikit-learn`\n models persisted with `joblib`.\n \"\"\"\n\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(self._settings)\n self._model = mlflow.pyfunc.load_model(model_uri)\n\n self.ready = True\n return self.ready\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n decoded_payload = get_decoded_or_raw(payload)\n\n # TODO: Can `output` be a dictionary of tensors?\n model_output = self._model.predict(decoded_payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=to_outputs(model_output),\n )\n", "path": "runtimes/mlflow/mlserver_mlflow/runtime.py"}]} | 935 | 873 |
gh_patches_debug_38605 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3616 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add 2.0 payments to history pages
@ehmatthes at https://github.com/gratipay/gratipay.com/issues/3446#issuecomment-103163666:
> I made an account for introtopython.org last week, and it was one of the teams that should have been included in last week's payday. But my balance is low enough that I didn't get a payout.
>
> How will I know if my accounts are set up correctly, ie if my team was included in last week's payday? I don't know how to view my team account, or if it's even possible to view a profile page for that team yet. introtopython is not included in my teams page. My individual history doesn't show anything after payday 152.
#### Notify:
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2233
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2358
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2405
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2514
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2468
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2354
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2540
- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2450
</issue>
<code>
[start of gratipay/utils/history.py]
1 from datetime import datetime
2 from decimal import Decimal
3
4 from aspen import Response
5 from psycopg2 import IntegrityError
6
7
8 def get_end_of_year_balance(db, participant, year, current_year):
9 if year == current_year:
10 return participant.balance
11 start = participant.claimed_time or participant.ctime
12 if year < start.year:
13 return Decimal('0.00')
14
15 balance = db.one("""
16 SELECT balance
17 FROM balances_at
18 WHERE participant = %s
19 AND "at" = %s
20 """, (participant.id, datetime(year+1, 1, 1)))
21 if balance is not None:
22 return balance
23
24 username = participant.username
25 start_balance = get_end_of_year_balance(db, participant, year-1, current_year)
26 delta = db.one("""
27 SELECT (
28 SELECT COALESCE(sum(amount), 0) AS a
29 FROM exchanges
30 WHERE participant = %(username)s
31 AND extract(year from timestamp) = %(year)s
32 AND amount > 0
33 AND (status is null OR status = 'succeeded')
34 ) + (
35 SELECT COALESCE(sum(amount-fee), 0) AS a
36 FROM exchanges
37 WHERE participant = %(username)s
38 AND extract(year from timestamp) = %(year)s
39 AND amount < 0
40 AND (status is null OR status <> 'failed')
41 ) + (
42 SELECT COALESCE(sum(-amount), 0) AS a
43 FROM transfers
44 WHERE tipper = %(username)s
45 AND extract(year from timestamp) = %(year)s
46 ) + (
47 SELECT COALESCE(sum(amount), 0) AS a
48 FROM transfers
49 WHERE tippee = %(username)s
50 AND extract(year from timestamp) = %(year)s
51 ) AS delta
52 """, locals())
53 balance = start_balance + delta
54 try:
55 db.run("""
56 INSERT INTO balances_at
57 (participant, at, balance)
58 VALUES (%s, %s, %s)
59 """, (participant.id, datetime(year+1, 1, 1), balance))
60 except IntegrityError:
61 pass
62 return balance
63
64
65 def iter_payday_events(db, participant, year=None):
66 """Yields payday events for the given participant.
67 """
68 current_year = datetime.utcnow().year
69 year = year or current_year
70
71 username = participant.username
72 exchanges = db.all("""
73 SELECT *
74 FROM exchanges
75 WHERE participant=%(username)s
76 AND extract(year from timestamp) = %(year)s
77 """, locals(), back_as=dict)
78 transfers = db.all("""
79 SELECT *
80 FROM transfers
81 WHERE (tipper=%(username)s OR tippee=%(username)s)
82 AND extract(year from timestamp) = %(year)s
83 """, locals(), back_as=dict)
84
85 if not (exchanges or transfers):
86 return
87
88 if transfers:
89 yield dict(
90 kind='totals',
91 given=sum(t['amount'] for t in transfers if t['tipper'] == username and t['context'] != 'take'),
92 received=sum(t['amount'] for t in transfers if t['tippee'] == username),
93 )
94
95 payday_dates = db.all("""
96 SELECT ts_start::date
97 FROM paydays
98 ORDER BY ts_start ASC
99 """)
100
101 balance = get_end_of_year_balance(db, participant, year, current_year)
102 prev_date = None
103 get_timestamp = lambda e: e['timestamp']
104 events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)
105 for event in events:
106
107 event['balance'] = balance
108
109 event_date = event['timestamp'].date()
110 if event_date != prev_date:
111 if prev_date:
112 yield dict(kind='day-close', balance=balance)
113 day_open = dict(kind='day-open', date=event_date, balance=balance)
114 if payday_dates:
115 while payday_dates and payday_dates[-1] > event_date:
116 payday_dates.pop()
117 payday_date = payday_dates[-1] if payday_dates else None
118 if event_date == payday_date:
119 day_open['payday_number'] = len(payday_dates) - 1
120 yield day_open
121 prev_date = event_date
122
123 if 'fee' in event:
124 if event['amount'] > 0:
125 kind = 'charge'
126 if event['status'] in (None, 'succeeded'):
127 balance -= event['amount']
128 else:
129 kind = 'credit'
130 if event['status'] != 'failed':
131 balance -= event['amount'] - event['fee']
132 else:
133 kind = 'transfer'
134 if event['tippee'] == username:
135 balance -= event['amount']
136 else:
137 balance += event['amount']
138 event['kind'] = kind
139
140 yield event
141
142 yield dict(kind='day-close', balance=balance)
143
144
145 def export_history(participant, year, mode, key, back_as='namedtuple', require_key=False):
146 db = participant.db
147 params = dict(username=participant.username, year=year)
148 out = {}
149 if mode == 'aggregate':
150 out['given'] = lambda: db.all("""
151 SELECT tippee, sum(amount) AS amount
152 FROM transfers
153 WHERE tipper = %(username)s
154 AND extract(year from timestamp) = %(year)s
155 GROUP BY tippee
156 """, params, back_as=back_as)
157 out['taken'] = lambda: db.all("""
158 SELECT tipper AS team, sum(amount) AS amount
159 FROM transfers
160 WHERE tippee = %(username)s
161 AND context = 'take'
162 AND extract(year from timestamp) = %(year)s
163 GROUP BY tipper
164 """, params, back_as=back_as)
165 else:
166 out['exchanges'] = lambda: db.all("""
167 SELECT timestamp, amount, fee, status, note
168 FROM exchanges
169 WHERE participant = %(username)s
170 AND extract(year from timestamp) = %(year)s
171 ORDER BY timestamp ASC
172 """, params, back_as=back_as)
173 out['given'] = lambda: db.all("""
174 SELECT timestamp, tippee, amount, context
175 FROM transfers
176 WHERE tipper = %(username)s
177 AND extract(year from timestamp) = %(year)s
178 ORDER BY timestamp ASC
179 """, params, back_as=back_as)
180 out['taken'] = lambda: db.all("""
181 SELECT timestamp, tipper AS team, amount
182 FROM transfers
183 WHERE tippee = %(username)s
184 AND context = 'take'
185 AND extract(year from timestamp) = %(year)s
186 ORDER BY timestamp ASC
187 """, params, back_as=back_as)
188 out['received'] = lambda: db.all("""
189 SELECT timestamp, amount, context
190 FROM transfers
191 WHERE tippee = %(username)s
192 AND context NOT IN ('take', 'take-over')
193 AND extract(year from timestamp) = %(year)s
194 ORDER BY timestamp ASC
195 """, params, back_as=back_as)
196
197 if key:
198 try:
199 return out[key]()
200 except KeyError:
201 raise Response(400, "bad key `%s`" % key)
202 elif require_key:
203 raise Response(400, "missing `key` parameter")
204 else:
205 return {k: v() for k, v in out.items()}
206
[end of gratipay/utils/history.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py
--- a/gratipay/utils/history.py
+++ b/gratipay/utils/history.py
@@ -75,6 +75,12 @@
WHERE participant=%(username)s
AND extract(year from timestamp) = %(year)s
""", locals(), back_as=dict)
+ payments = db.all("""
+ SELECT *
+ FROM payments
+ WHERE participant=%(username)s
+ AND extract(year from timestamp) = %(year)s
+ """, locals(), back_as=dict)
transfers = db.all("""
SELECT *
FROM transfers
@@ -82,15 +88,20 @@
AND extract(year from timestamp) = %(year)s
""", locals(), back_as=dict)
- if not (exchanges or transfers):
+ if not (exchanges or payments or transfers):
return
- if transfers:
- yield dict(
- kind='totals',
- given=sum(t['amount'] for t in transfers if t['tipper'] == username and t['context'] != 'take'),
- received=sum(t['amount'] for t in transfers if t['tippee'] == username),
- )
+ if payments or transfers:
+ payments_given = sum([p['amount'] for p in payments if p['direction'] == 'to-team'])
+ payments_received = sum([p['amount'] for p in payments \
+ if p['direction'] == 'to-participant'])
+ transfers_given = sum(t['amount'] for t in transfers \
+ if t['tipper'] == username and t['context'] != 'take')
+ transfers_received = sum(t['amount'] for t in transfers if t['tippee'] == username)
+ yield dict( kind='totals'
+ , given=payments_given + transfers_given
+ , received=payments_received + transfers_received
+ )
payday_dates = db.all("""
SELECT ts_start::date
@@ -101,7 +112,7 @@
balance = get_end_of_year_balance(db, participant, year, current_year)
prev_date = None
get_timestamp = lambda e: e['timestamp']
- events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)
+ events = sorted(exchanges+payments+transfers, key=get_timestamp, reverse=True)
for event in events:
event['balance'] = balance
@@ -129,6 +140,13 @@
kind = 'credit'
if event['status'] != 'failed':
balance -= event['amount'] - event['fee']
+ elif 'direction' in event:
+ kind = 'payment'
+ if event['direction'] == 'to-participant':
+ balance -= event['amount']
+ else:
+ assert event['direction'] == 'to-team'
+ balance += event['amount']
else:
kind = 'transfer'
if event['tippee'] == username:
| {"golden_diff": "diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py\n--- a/gratipay/utils/history.py\n+++ b/gratipay/utils/history.py\n@@ -75,6 +75,12 @@\n WHERE participant=%(username)s\n AND extract(year from timestamp) = %(year)s\n \"\"\", locals(), back_as=dict)\n+ payments = db.all(\"\"\"\n+ SELECT *\n+ FROM payments\n+ WHERE participant=%(username)s\n+ AND extract(year from timestamp) = %(year)s\n+ \"\"\", locals(), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n@@ -82,15 +88,20 @@\n AND extract(year from timestamp) = %(year)s\n \"\"\", locals(), back_as=dict)\n \n- if not (exchanges or transfers):\n+ if not (exchanges or payments or transfers):\n return\n \n- if transfers:\n- yield dict(\n- kind='totals',\n- given=sum(t['amount'] for t in transfers if t['tipper'] == username and t['context'] != 'take'),\n- received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n- )\n+ if payments or transfers:\n+ payments_given = sum([p['amount'] for p in payments if p['direction'] == 'to-team'])\n+ payments_received = sum([p['amount'] for p in payments \\\n+ if p['direction'] == 'to-participant'])\n+ transfers_given = sum(t['amount'] for t in transfers \\\n+ if t['tipper'] == username and t['context'] != 'take')\n+ transfers_received = sum(t['amount'] for t in transfers if t['tippee'] == username)\n+ yield dict( kind='totals'\n+ , given=payments_given + transfers_given\n+ , received=payments_received + transfers_received\n+ )\n \n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n@@ -101,7 +112,7 @@\n balance = get_end_of_year_balance(db, participant, year, current_year)\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n- events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n+ events = sorted(exchanges+payments+transfers, key=get_timestamp, reverse=True)\n for event in events:\n \n event['balance'] = balance\n@@ -129,6 +140,13 @@\n kind = 'credit'\n if event['status'] != 'failed':\n balance -= event['amount'] - event['fee']\n+ elif 'direction' in event:\n+ kind = 'payment'\n+ if event['direction'] == 'to-participant':\n+ balance -= event['amount']\n+ else:\n+ assert event['direction'] == 'to-team'\n+ balance += event['amount']\n else:\n kind = 'transfer'\n if event['tippee'] == username:\n", "issue": "add 2.0 payments to history pages\n@ehmatthes at https://github.com/gratipay/gratipay.com/issues/3446#issuecomment-103163666:\n\n> I made an account for introtopython.org last week, and it was one of the teams that should have been included in last week's payday. But my balance is low enough that I didn't get a payout.\n> \n> How will I know if my accounts are set up correctly, ie if my team was included in last week's payday? I don't know how to view my team account, or if it's even possible to view a profile page for that team yet. introtopython is not included in my teams page. My individual history doesn't show anything after payday 152.\n#### Notify:\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2233\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2358\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2405\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2514\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2468\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2354\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2540\n- [x] https://gratipay.freshdesk.com/helpdesk/tickets/2450\n\n", "before_files": [{"content": "from datetime import datetime\nfrom decimal import Decimal\n\nfrom aspen import Response\nfrom psycopg2 import IntegrityError\n\n\ndef get_end_of_year_balance(db, participant, year, current_year):\n if year == current_year:\n return participant.balance\n start = participant.claimed_time or participant.ctime\n if year < start.year:\n return Decimal('0.00')\n\n balance = db.one(\"\"\"\n SELECT balance\n FROM balances_at\n WHERE participant = %s\n AND \"at\" = %s\n \"\"\", (participant.id, datetime(year+1, 1, 1)))\n if balance is not None:\n return balance\n\n username = participant.username\n start_balance = get_end_of_year_balance(db, participant, year-1, current_year)\n delta = db.one(\"\"\"\n SELECT (\n SELECT COALESCE(sum(amount), 0) AS a\n FROM exchanges\n WHERE participant = %(username)s\n AND extract(year from timestamp) = %(year)s\n AND amount > 0\n AND (status is null OR status = 'succeeded')\n ) + (\n SELECT COALESCE(sum(amount-fee), 0) AS a\n FROM exchanges\n WHERE participant = %(username)s\n AND extract(year from timestamp) = %(year)s\n AND amount < 0\n AND (status is null OR status <> 'failed')\n ) + (\n SELECT COALESCE(sum(-amount), 0) AS a\n FROM transfers\n WHERE tipper = %(username)s\n AND extract(year from timestamp) = %(year)s\n ) + (\n SELECT COALESCE(sum(amount), 0) AS a\n FROM transfers\n WHERE tippee = %(username)s\n AND extract(year from timestamp) = %(year)s\n ) AS delta\n \"\"\", locals())\n balance = start_balance + delta\n try:\n db.run(\"\"\"\n INSERT INTO balances_at\n (participant, at, balance)\n VALUES (%s, %s, %s)\n \"\"\", (participant.id, datetime(year+1, 1, 1), balance))\n except IntegrityError:\n pass\n return balance\n\n\ndef iter_payday_events(db, participant, year=None):\n \"\"\"Yields payday events for the given participant.\n \"\"\"\n current_year = datetime.utcnow().year\n year = year or current_year\n\n username = participant.username\n exchanges = db.all(\"\"\"\n SELECT *\n FROM exchanges\n WHERE participant=%(username)s\n AND extract(year from timestamp) = %(year)s\n \"\"\", locals(), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n WHERE (tipper=%(username)s OR tippee=%(username)s)\n AND extract(year from timestamp) = %(year)s\n \"\"\", locals(), back_as=dict)\n\n if not (exchanges or transfers):\n return\n\n if transfers:\n yield dict(\n kind='totals',\n given=sum(t['amount'] for t in transfers if t['tipper'] == username and t['context'] != 'take'),\n received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n )\n\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n ORDER BY ts_start ASC\n \"\"\")\n\n balance = get_end_of_year_balance(db, participant, year, current_year)\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n for event in events:\n\n event['balance'] = balance\n\n event_date = event['timestamp'].date()\n if event_date != prev_date:\n if prev_date:\n yield dict(kind='day-close', balance=balance)\n day_open = dict(kind='day-open', date=event_date, balance=balance)\n if payday_dates:\n while payday_dates and payday_dates[-1] > event_date:\n payday_dates.pop()\n payday_date = payday_dates[-1] if payday_dates else None\n if event_date == payday_date:\n day_open['payday_number'] = len(payday_dates) - 1\n yield day_open\n prev_date = event_date\n\n if 'fee' in event:\n if event['amount'] > 0:\n kind = 'charge'\n if event['status'] in (None, 'succeeded'):\n balance -= event['amount']\n else:\n kind = 'credit'\n if event['status'] != 'failed':\n balance -= event['amount'] - event['fee']\n else:\n kind = 'transfer'\n if event['tippee'] == username:\n balance -= event['amount']\n else:\n balance += event['amount']\n event['kind'] = kind\n\n yield event\n\n yield dict(kind='day-close', balance=balance)\n\n\ndef export_history(participant, year, mode, key, back_as='namedtuple', require_key=False):\n db = participant.db\n params = dict(username=participant.username, year=year)\n out = {}\n if mode == 'aggregate':\n out['given'] = lambda: db.all(\"\"\"\n SELECT tippee, sum(amount) AS amount\n FROM transfers\n WHERE tipper = %(username)s\n AND extract(year from timestamp) = %(year)s\n GROUP BY tippee\n \"\"\", params, back_as=back_as)\n out['taken'] = lambda: db.all(\"\"\"\n SELECT tipper AS team, sum(amount) AS amount\n FROM transfers\n WHERE tippee = %(username)s\n AND context = 'take'\n AND extract(year from timestamp) = %(year)s\n GROUP BY tipper\n \"\"\", params, back_as=back_as)\n else:\n out['exchanges'] = lambda: db.all(\"\"\"\n SELECT timestamp, amount, fee, status, note\n FROM exchanges\n WHERE participant = %(username)s\n AND extract(year from timestamp) = %(year)s\n ORDER BY timestamp ASC\n \"\"\", params, back_as=back_as)\n out['given'] = lambda: db.all(\"\"\"\n SELECT timestamp, tippee, amount, context\n FROM transfers\n WHERE tipper = %(username)s\n AND extract(year from timestamp) = %(year)s\n ORDER BY timestamp ASC\n \"\"\", params, back_as=back_as)\n out['taken'] = lambda: db.all(\"\"\"\n SELECT timestamp, tipper AS team, amount\n FROM transfers\n WHERE tippee = %(username)s\n AND context = 'take'\n AND extract(year from timestamp) = %(year)s\n ORDER BY timestamp ASC\n \"\"\", params, back_as=back_as)\n out['received'] = lambda: db.all(\"\"\"\n SELECT timestamp, amount, context\n FROM transfers\n WHERE tippee = %(username)s\n AND context NOT IN ('take', 'take-over')\n AND extract(year from timestamp) = %(year)s\n ORDER BY timestamp ASC\n \"\"\", params, back_as=back_as)\n\n if key:\n try:\n return out[key]()\n except KeyError:\n raise Response(400, \"bad key `%s`\" % key)\n elif require_key:\n raise Response(400, \"missing `key` parameter\")\n else:\n return {k: v() for k, v in out.items()}\n", "path": "gratipay/utils/history.py"}]} | 2,997 | 660 |
gh_patches_debug_20942 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-3873 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enhance `ContrastiveLoss` to avoid warning
Call ContrastiveLoss will see a warning message:
```
To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
```
Simple code to reproduce this issue:
```
from monai.losses import ContrastiveLoss
import torch
inp = torch.randn([2, 10])
target = torch.randn([2, 10])
loss = ContrastiveLoss(batch_size=2)
loss(inp, target)
```
</issue>
<code>
[start of monai/losses/contrastive.py]
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import torch
13 from torch.nn import functional as F
14 from torch.nn.modules.loss import _Loss
15
16 from monai.utils import deprecated_arg
17
18
19 class ContrastiveLoss(_Loss):
20
21 """
22 Compute the Contrastive loss defined in:
23
24 Chen, Ting, et al. "A simple framework for contrastive learning of visual representations." International
25 conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)
26
27 Adapted from:
28 https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5
29
30 """
31
32 @deprecated_arg(name="reduction", since="0.8", msg_suffix="`reduction` is no longer supported.")
33 def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction="sum") -> None:
34 """
35 Args:
36 temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
37 batch_size: The number of samples.
38
39 Raises:
40 ValueError: When an input of dimension length > 2 is passed
41 ValueError: When input and target are of different shapes
42
43 .. deprecated:: 0.8.0
44
45 `reduction` is no longer supported.
46
47 """
48 super().__init__()
49
50 self.batch_size = batch_size
51 self.temperature = temperature
52
53 def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
54 """
55 Args:
56 input: the shape should be B[F].
57 target: the shape should be B[F].
58 """
59 if len(target.shape) > 2 or len(input.shape) > 2:
60 raise ValueError(
61 f"Either target or input has dimensions greater than 2 where target "
62 f"shape is ({target.shape}) and input shape is ({input.shape})"
63 )
64
65 if target.shape != input.shape:
66 raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
67
68 temperature_tensor = torch.tensor(self.temperature).to(input.device)
69
70 norm_i = F.normalize(input, dim=1)
71 norm_j = F.normalize(target, dim=1)
72
73 negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
74 negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)
75 negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)
76
77 repr = torch.cat([norm_i, norm_j], dim=0)
78 sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
79 sim_ij = torch.diag(sim_matrix, self.batch_size)
80 sim_ji = torch.diag(sim_matrix, -self.batch_size)
81
82 positives = torch.cat([sim_ij, sim_ji], dim=0)
83 nominator = torch.exp(positives / temperature_tensor)
84 denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)
85
86 loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
87
88 return torch.sum(loss_partial) / (2 * self.batch_size)
89
[end of monai/losses/contrastive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py
--- a/monai/losses/contrastive.py
+++ b/monai/losses/contrastive.py
@@ -65,14 +65,13 @@
if target.shape != input.shape:
raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
- temperature_tensor = torch.tensor(self.temperature).to(input.device)
+ temperature_tensor = torch.as_tensor(self.temperature).to(input.device)
norm_i = F.normalize(input, dim=1)
norm_j = F.normalize(target, dim=1)
negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
- negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)
- negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)
+ negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)
repr = torch.cat([norm_i, norm_j], dim=0)
sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
| {"golden_diff": "diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py\n--- a/monai/losses/contrastive.py\n+++ b/monai/losses/contrastive.py\n@@ -65,14 +65,13 @@\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n \n- temperature_tensor = torch.tensor(self.temperature).to(input.device)\n+ temperature_tensor = torch.as_tensor(self.temperature).to(input.device)\n \n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n \n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n- negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n- negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n+ negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)\n \n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n", "issue": "Enhance `ContrastiveLoss` to avoid warning\nCall ContrastiveLoss will see a warning message:\r\n```\r\nTo copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\r\n```\r\n\r\nSimple code to reproduce this issue:\r\n```\r\nfrom monai.losses import ContrastiveLoss\r\nimport torch\r\n\r\ninp = torch.randn([2, 10])\r\ntarget = torch.randn([2, 10])\r\nloss = ContrastiveLoss(batch_size=2)\r\nloss(inp, target)\r\n```\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import deprecated_arg\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n batch_size: The number of samples.\n\n Raises:\n ValueError: When an input of dimension length > 2 is passed\n ValueError: When input and target are of different shapes\n\n .. deprecated:: 0.8.0\n\n `reduction` is no longer supported.\n\n \"\"\"\n super().__init__()\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n return torch.sum(loss_partial) / (2 * self.batch_size)\n", "path": "monai/losses/contrastive.py"}]} | 1,694 | 270 |
gh_patches_debug_41944 | rasdani/github-patches | git_diff | wagtail__wagtail-11666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inconsistent use of model verbose_name in permissions UI
### Issue Summary
The object permissions UI doesn't show a model's [`verbose_name`](https://docs.djangoproject.com/en/4.2/ref/models/options/#verbose-name) if it differs from the model's name when it was created.
### Steps to Reproduce
Start a new project with `wagtail start myproject` and create a simple model like the following:
```py
from django.db import models
from wagtail.snippets.models import register_snippet
@register_snippet
class MyModel(models.Model):
text = models.TextField()
```
In the group edit view, permissions render like this; note "My model" and "Can view":

Now, give the model a different `verbose_name`:
```py
@register_snippet
class MyModel(models.Model):
text = models.TextField()
class Meta:
verbose_name = "Something else"
```
Now, the permissions render in a surprising way; note "Something else" and "Can view my model":

As a user, I would expect to see "Can view" the way all other objects are typically listed.
This behavior seems to come from this line that defines the permission "name":
https://github.com/wagtail/wagtail/blob/a8bc03dd8aad9d15fd4d8e84ace1bc89cbe96053/wagtail/users/templatetags/wagtailusers_tags.py#L101-L103
This code assumes that a model's content type's name (`perm.content_type.name`) will exactly match the name on the permission object (`perm.name`). This is true as long as the model's verbose name hasn't been changed, but, [per the docs](https://docs.djangoproject.com/en/4.2/ref/contrib/contenttypes/#django.contrib.contenttypes.models.ContentType.name), `content_type.name` "is taken from the `verbose_name` attribute of the model". So if a model's verbose name is changed, that change will show up when you look at its content type.
The issue is that when a model is first created, its automatically-generated permissions are given names that contain the model's name at that time. For example, the above `MyModel` class generated permissions like this:
```
110|28|add_mymodel|Can add my model
111|28|change_mymodel|Can change my model
112|28|delete_mymodel|Can delete my model
113|28|view_mymodel|Can view my model
```
In the above example, the code assumes that since the content type reports its name as `Something else`, the permission's name will be `Can view something else`, but this isn't a valid assumption. The permission's name is still what it was when it was created, `Can view my model`.
To fix this, the code could lookup the model objects corresponding to the permission content types and use those to do the substitution, maybe something like this:
```diff
diff --git a/wagtail/users/templatetags/wagtailusers_tags.py b/wagtail/users/templatetags/wagtailusers_tags.py
index c188425ad0..3da81fe0dd 100644
--- a/wagtail/users/templatetags/wagtailusers_tags.py
+++ b/wagtail/users/templatetags/wagtailusers_tags.py
@@ -95,11 +95,15 @@ def format_permissions(permission_bound_field):
}
else:
extra_perms_exist["custom"] = True
+ perm_model_class = perm.content_type.model_class()
custom_perms.append(
{
"perm": perm,
"name": re.sub(
- f"{perm.content_type.name}$", "", perm.name, flags=re.I
+ f"{perm_model_class._meta.model_name}$", "", perm.name, flags=re.I
).strip(),
"selected": checkbox.data["selected"],
}
```
### Technical details
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
- Python version: 3.11.1
- Django version: 4.0.10
- Wagtail version: 5.2a0
</issue>
<code>
[start of wagtail/users/templatetags/wagtailusers_tags.py]
1 import itertools
2 from collections import defaultdict
3
4 from django import template
5
6 from wagtail import hooks
7 from wagtail.users.permission_order import CONTENT_TYPE_ORDER
8
9 register = template.Library()
10
11
12 @register.inclusion_tag("wagtailusers/groups/includes/formatted_permissions.html")
13 def format_permissions(permission_bound_field):
14 """
15 Given a bound field with a queryset of Permission objects - which must be using
16 the CheckboxSelectMultiple widget - construct a list of dictionaries for 'objects':
17
18 'objects': [
19 {
20 'object': name_of_some_content_object,
21 'add': checkbox,
22 'change': checkbox,
23 'delete': checkbox,
24 'publish': checkbox, # only if the model extends DraftStateMixin
25 'custom': list_of_checkboxes_for_custom_permissions
26 },
27 ]
28
29 and a list of other permissions:
30
31 'others': [
32 (any_non_add_change_delete_permission, checkbox),
33 ]
34
35 (where 'checkbox' is an object with a tag() method that renders the checkbox as HTML;
36 this is a BoundWidget on Django >=1.11)
37
38 - and returns a table template formatted with this list.
39
40 """
41 permissions = permission_bound_field.field._queryset
42 # get a distinct and ordered list of the content types that these permissions relate to.
43 # relies on Permission model default ordering, dict.fromkeys() retaining that order
44 # from the queryset, and the stability of sorted().
45 content_type_ids = sorted(
46 dict.fromkeys(permissions.values_list("content_type_id", flat=True)),
47 key=lambda ct: CONTENT_TYPE_ORDER.get(ct, float("inf")),
48 )
49
50 # iterate over permission_bound_field to build a lookup of individual renderable
51 # checkbox objects
52 # checkbox.data['value'] gives a ModelChoiceIteratorValue
53 checkboxes_by_id = {
54 int(checkbox.data["value"].value): checkbox
55 for checkbox in permission_bound_field
56 }
57
58 object_perms = []
59 other_perms = []
60
61 # Permissions that are known by Wagtail, to be shown under their own columns.
62 # Other permissions will be shown under the "custom permissions" column.
63 main_permission_names = ["add", "change", "delete", "publish", "lock", "unlock"]
64
65 # Only show the columns for these permissions if any of the model has them.
66 extra_perms_exist = {
67 "publish": False,
68 "lock": False,
69 "unlock": False,
70 "custom": False,
71 }
72 # Batch the permission query for all content types, then group by content type
73 # (instead of querying permissions for each content type separately)
74 content_perms_by_ct_id = defaultdict(list)
75 permissions = permissions.filter(content_type_id__in=content_type_ids)
76 for permission in permissions:
77 content_perms_by_ct_id[permission.content_type_id].append(permission)
78
79 # Iterate using the sorted content_type_ids
80 for ct_id in content_type_ids:
81 content_perms = content_perms_by_ct_id[ct_id]
82 content_perms_dict = {}
83 custom_perms = []
84
85 if content_perms[0].content_type.name == "admin":
86 perm = content_perms[0]
87 other_perms.append((perm, checkboxes_by_id[perm.id]))
88 continue
89
90 for perm in content_perms:
91 content_perms_dict["object"] = perm.content_type.name
92 checkbox = checkboxes_by_id[perm.id]
93 # identify the main categories of permission, and assign to
94 # the relevant dict key, else bung in the 'custom_perms' list
95 permission_action = perm.codename.split("_", maxsplit=1)
96 permission_action = permission_action[permission_action[0].lower() == "can"]
97 permission_action = permission_action.rsplit(maxsplit=1)[0]
98
99 if permission_action in main_permission_names:
100 if permission_action in extra_perms_exist:
101 extra_perms_exist[permission_action] = True
102 content_perms_dict[permission_action] = {
103 "perm": perm,
104 "checkbox": checkbox,
105 }
106 else:
107 extra_perms_exist["custom"] = True
108 custom_perms.append(
109 {
110 "perm": perm,
111 "name": f"Can {permission_action}",
112 "selected": checkbox.data["selected"],
113 }
114 )
115
116 content_perms_dict["custom"] = custom_perms
117 object_perms.append(content_perms_dict)
118 return {
119 "object_perms": object_perms,
120 "other_perms": other_perms,
121 "extra_perms_exist": extra_perms_exist,
122 }
123
124
125 @register.inclusion_tag("wagtailadmin/shared/buttons.html", takes_context=True)
126 def user_listing_buttons(context, user):
127 button_hooks = hooks.get_hooks("register_user_listing_buttons")
128 buttons = sorted(
129 itertools.chain.from_iterable(hook(context, user) for hook in button_hooks)
130 )
131 return {"user": user, "buttons": buttons}
132
[end of wagtail/users/templatetags/wagtailusers_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/users/templatetags/wagtailusers_tags.py b/wagtail/users/templatetags/wagtailusers_tags.py
--- a/wagtail/users/templatetags/wagtailusers_tags.py
+++ b/wagtail/users/templatetags/wagtailusers_tags.py
@@ -2,6 +2,9 @@
from collections import defaultdict
from django import template
+from django.contrib.auth import get_permission_codename
+from django.contrib.auth.models import Permission
+from django.utils.text import camel_case_to_spaces
from wagtail import hooks
from wagtail.users.permission_order import CONTENT_TYPE_ORDER
@@ -9,6 +12,45 @@
register = template.Library()
+def normalize_permission_label(permission: Permission):
+ """
+ Strip model name from the end of the label, e.g. "Can deliver pizza" for a
+ Pizza model becomes "Can deliver". For permissions in the model's
+ Meta.default_permissions with default labels, also replace underscores
+ with spaces.
+
+ This is used to display custom model permissions in the admin.
+
+ See https://github.com/wagtail/wagtail/issues/10982.
+ """
+ label = permission.name
+ content_type = permission.content_type
+ model = content_type.model_class()
+ verbose_name = default_verbose_name = content_type.name
+
+ if model:
+ default_verbose_name = camel_case_to_spaces(model._meta.object_name)
+
+ # If it's in default_permissions and the label matches Django's default
+ # label, remove the model name from the end of the label. Also replace
+ # underscores with spaces, as Django uses the action internal name as-is
+ # for the permission label, which means it tends to be in snake_case.
+ for action in model._meta.default_permissions:
+ default_codename = get_permission_codename(action, model._meta)
+ is_default = permission.codename == default_codename
+ if is_default and permission.name.startswith(f"Can {action}"):
+ return f"Can {action.replace('_', ' ')}"
+
+ # For all other cases (including custom permissions), try to remove the
+ # verbose name from the end of the label. This only works if the label
+ # matches the current verbose name or Django's default verbose name.
+ for name in (default_verbose_name, verbose_name):
+ if label.lower().endswith(name.lower()):
+ return label[: -len(name)].strip()
+
+ return label
+
+
@register.inclusion_tag("wagtailusers/groups/includes/formatted_permissions.html")
def format_permissions(permission_bound_field):
"""
@@ -92,10 +134,7 @@
checkbox = checkboxes_by_id[perm.id]
# identify the main categories of permission, and assign to
# the relevant dict key, else bung in the 'custom_perms' list
- permission_action = perm.codename.split("_", maxsplit=1)
- permission_action = permission_action[permission_action[0].lower() == "can"]
- permission_action = permission_action.rsplit(maxsplit=1)[0]
-
+ permission_action = perm.codename.split("_")[0]
if permission_action in main_permission_names:
if permission_action in extra_perms_exist:
extra_perms_exist[permission_action] = True
@@ -105,10 +144,12 @@
}
else:
extra_perms_exist["custom"] = True
+ perm_name = normalize_permission_label(perm)
+
custom_perms.append(
{
"perm": perm,
- "name": f"Can {permission_action}",
+ "name": perm_name,
"selected": checkbox.data["selected"],
}
)
| {"golden_diff": "diff --git a/wagtail/users/templatetags/wagtailusers_tags.py b/wagtail/users/templatetags/wagtailusers_tags.py\n--- a/wagtail/users/templatetags/wagtailusers_tags.py\n+++ b/wagtail/users/templatetags/wagtailusers_tags.py\n@@ -2,6 +2,9 @@\n from collections import defaultdict\n \n from django import template\n+from django.contrib.auth import get_permission_codename\n+from django.contrib.auth.models import Permission\n+from django.utils.text import camel_case_to_spaces\n \n from wagtail import hooks\n from wagtail.users.permission_order import CONTENT_TYPE_ORDER\n@@ -9,6 +12,45 @@\n register = template.Library()\n \n \n+def normalize_permission_label(permission: Permission):\n+ \"\"\"\n+ Strip model name from the end of the label, e.g. \"Can deliver pizza\" for a\n+ Pizza model becomes \"Can deliver\". For permissions in the model's\n+ Meta.default_permissions with default labels, also replace underscores\n+ with spaces.\n+\n+ This is used to display custom model permissions in the admin.\n+\n+ See https://github.com/wagtail/wagtail/issues/10982.\n+ \"\"\"\n+ label = permission.name\n+ content_type = permission.content_type\n+ model = content_type.model_class()\n+ verbose_name = default_verbose_name = content_type.name\n+\n+ if model:\n+ default_verbose_name = camel_case_to_spaces(model._meta.object_name)\n+\n+ # If it's in default_permissions and the label matches Django's default\n+ # label, remove the model name from the end of the label. Also replace\n+ # underscores with spaces, as Django uses the action internal name as-is\n+ # for the permission label, which means it tends to be in snake_case.\n+ for action in model._meta.default_permissions:\n+ default_codename = get_permission_codename(action, model._meta)\n+ is_default = permission.codename == default_codename\n+ if is_default and permission.name.startswith(f\"Can {action}\"):\n+ return f\"Can {action.replace('_', ' ')}\"\n+\n+ # For all other cases (including custom permissions), try to remove the\n+ # verbose name from the end of the label. This only works if the label\n+ # matches the current verbose name or Django's default verbose name.\n+ for name in (default_verbose_name, verbose_name):\n+ if label.lower().endswith(name.lower()):\n+ return label[: -len(name)].strip()\n+\n+ return label\n+\n+\n @register.inclusion_tag(\"wagtailusers/groups/includes/formatted_permissions.html\")\n def format_permissions(permission_bound_field):\n \"\"\"\n@@ -92,10 +134,7 @@\n checkbox = checkboxes_by_id[perm.id]\n # identify the main categories of permission, and assign to\n # the relevant dict key, else bung in the 'custom_perms' list\n- permission_action = perm.codename.split(\"_\", maxsplit=1)\n- permission_action = permission_action[permission_action[0].lower() == \"can\"]\n- permission_action = permission_action.rsplit(maxsplit=1)[0]\n-\n+ permission_action = perm.codename.split(\"_\")[0]\n if permission_action in main_permission_names:\n if permission_action in extra_perms_exist:\n extra_perms_exist[permission_action] = True\n@@ -105,10 +144,12 @@\n }\n else:\n extra_perms_exist[\"custom\"] = True\n+ perm_name = normalize_permission_label(perm)\n+\n custom_perms.append(\n {\n \"perm\": perm,\n- \"name\": f\"Can {permission_action}\",\n+ \"name\": perm_name,\n \"selected\": checkbox.data[\"selected\"],\n }\n )\n", "issue": "Inconsistent use of model verbose_name in permissions UI\n### Issue Summary\r\n\r\nThe object permissions UI doesn't show a model's [`verbose_name`](https://docs.djangoproject.com/en/4.2/ref/models/options/#verbose-name) if it differs from the model's name when it was created.\r\n\r\n### Steps to Reproduce\r\n\r\nStart a new project with `wagtail start myproject` and create a simple model like the following:\r\n\r\n```py\r\nfrom django.db import models\r\n\r\nfrom wagtail.snippets.models import register_snippet\r\n\r\n@register_snippet\r\nclass MyModel(models.Model):\r\n text = models.TextField()\r\n```\r\n\r\nIn the group edit view, permissions render like this; note \"My model\" and \"Can view\":\r\n\r\n\r\n\r\nNow, give the model a different `verbose_name`:\r\n\r\n```py\r\n@register_snippet\r\nclass MyModel(models.Model):\r\n text = models.TextField()\r\n\r\n class Meta:\r\n verbose_name = \"Something else\"\r\n```\r\n\r\nNow, the permissions render in a surprising way; note \"Something else\" and \"Can view my model\":\r\n\r\n\r\n\r\nAs a user, I would expect to see \"Can view\" the way all other objects are typically listed.\r\n\r\nThis behavior seems to come from this line that defines the permission \"name\":\r\n\r\nhttps://github.com/wagtail/wagtail/blob/a8bc03dd8aad9d15fd4d8e84ace1bc89cbe96053/wagtail/users/templatetags/wagtailusers_tags.py#L101-L103\r\n\r\nThis code assumes that a model's content type's name (`perm.content_type.name`) will exactly match the name on the permission object (`perm.name`). This is true as long as the model's verbose name hasn't been changed, but, [per the docs](https://docs.djangoproject.com/en/4.2/ref/contrib/contenttypes/#django.contrib.contenttypes.models.ContentType.name), `content_type.name` \"is taken from the `verbose_name` attribute of the model\". So if a model's verbose name is changed, that change will show up when you look at its content type.\r\n\r\nThe issue is that when a model is first created, its automatically-generated permissions are given names that contain the model's name at that time. For example, the above `MyModel` class generated permissions like this:\r\n\r\n```\r\n110|28|add_mymodel|Can add my model\r\n111|28|change_mymodel|Can change my model\r\n112|28|delete_mymodel|Can delete my model\r\n113|28|view_mymodel|Can view my model\r\n```\r\n\r\nIn the above example, the code assumes that since the content type reports its name as `Something else`, the permission's name will be `Can view something else`, but this isn't a valid assumption. The permission's name is still what it was when it was created, `Can view my model`. \r\n\r\nTo fix this, the code could lookup the model objects corresponding to the permission content types and use those to do the substitution, maybe something like this:\r\n\r\n```diff\r\ndiff --git a/wagtail/users/templatetags/wagtailusers_tags.py b/wagtail/users/templatetags/wagtailusers_tags.py\r\nindex c188425ad0..3da81fe0dd 100644\r\n--- a/wagtail/users/templatetags/wagtailusers_tags.py\r\n+++ b/wagtail/users/templatetags/wagtailusers_tags.py\r\n@@ -95,11 +95,15 @@ def format_permissions(permission_bound_field):\r\n }\r\n else:\r\n extra_perms_exist[\"custom\"] = True\r\n+ perm_model_class = perm.content_type.model_class()\r\n custom_perms.append(\r\n {\r\n \"perm\": perm,\r\n \"name\": re.sub(\r\n- f\"{perm.content_type.name}$\", \"\", perm.name, flags=re.I\r\n+ f\"{perm_model_class._meta.model_name}$\", \"\", perm.name, flags=re.I\r\n ).strip(),\r\n \"selected\": checkbox.data[\"selected\"],\r\n }\r\n```\r\n\r\n### Technical details\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n- Python version: 3.11.1\r\n- Django version: 4.0.10\r\n- Wagtail version: 5.2a0\n", "before_files": [{"content": "import itertools\nfrom collections import defaultdict\n\nfrom django import template\n\nfrom wagtail import hooks\nfrom wagtail.users.permission_order import CONTENT_TYPE_ORDER\n\nregister = template.Library()\n\n\[email protected]_tag(\"wagtailusers/groups/includes/formatted_permissions.html\")\ndef format_permissions(permission_bound_field):\n \"\"\"\n Given a bound field with a queryset of Permission objects - which must be using\n the CheckboxSelectMultiple widget - construct a list of dictionaries for 'objects':\n\n 'objects': [\n {\n 'object': name_of_some_content_object,\n 'add': checkbox,\n 'change': checkbox,\n 'delete': checkbox,\n 'publish': checkbox, # only if the model extends DraftStateMixin\n 'custom': list_of_checkboxes_for_custom_permissions\n },\n ]\n\n and a list of other permissions:\n\n 'others': [\n (any_non_add_change_delete_permission, checkbox),\n ]\n\n (where 'checkbox' is an object with a tag() method that renders the checkbox as HTML;\n this is a BoundWidget on Django >=1.11)\n\n - and returns a table template formatted with this list.\n\n \"\"\"\n permissions = permission_bound_field.field._queryset\n # get a distinct and ordered list of the content types that these permissions relate to.\n # relies on Permission model default ordering, dict.fromkeys() retaining that order\n # from the queryset, and the stability of sorted().\n content_type_ids = sorted(\n dict.fromkeys(permissions.values_list(\"content_type_id\", flat=True)),\n key=lambda ct: CONTENT_TYPE_ORDER.get(ct, float(\"inf\")),\n )\n\n # iterate over permission_bound_field to build a lookup of individual renderable\n # checkbox objects\n # checkbox.data['value'] gives a ModelChoiceIteratorValue\n checkboxes_by_id = {\n int(checkbox.data[\"value\"].value): checkbox\n for checkbox in permission_bound_field\n }\n\n object_perms = []\n other_perms = []\n\n # Permissions that are known by Wagtail, to be shown under their own columns.\n # Other permissions will be shown under the \"custom permissions\" column.\n main_permission_names = [\"add\", \"change\", \"delete\", \"publish\", \"lock\", \"unlock\"]\n\n # Only show the columns for these permissions if any of the model has them.\n extra_perms_exist = {\n \"publish\": False,\n \"lock\": False,\n \"unlock\": False,\n \"custom\": False,\n }\n # Batch the permission query for all content types, then group by content type\n # (instead of querying permissions for each content type separately)\n content_perms_by_ct_id = defaultdict(list)\n permissions = permissions.filter(content_type_id__in=content_type_ids)\n for permission in permissions:\n content_perms_by_ct_id[permission.content_type_id].append(permission)\n\n # Iterate using the sorted content_type_ids\n for ct_id in content_type_ids:\n content_perms = content_perms_by_ct_id[ct_id]\n content_perms_dict = {}\n custom_perms = []\n\n if content_perms[0].content_type.name == \"admin\":\n perm = content_perms[0]\n other_perms.append((perm, checkboxes_by_id[perm.id]))\n continue\n\n for perm in content_perms:\n content_perms_dict[\"object\"] = perm.content_type.name\n checkbox = checkboxes_by_id[perm.id]\n # identify the main categories of permission, and assign to\n # the relevant dict key, else bung in the 'custom_perms' list\n permission_action = perm.codename.split(\"_\", maxsplit=1)\n permission_action = permission_action[permission_action[0].lower() == \"can\"]\n permission_action = permission_action.rsplit(maxsplit=1)[0]\n\n if permission_action in main_permission_names:\n if permission_action in extra_perms_exist:\n extra_perms_exist[permission_action] = True\n content_perms_dict[permission_action] = {\n \"perm\": perm,\n \"checkbox\": checkbox,\n }\n else:\n extra_perms_exist[\"custom\"] = True\n custom_perms.append(\n {\n \"perm\": perm,\n \"name\": f\"Can {permission_action}\",\n \"selected\": checkbox.data[\"selected\"],\n }\n )\n\n content_perms_dict[\"custom\"] = custom_perms\n object_perms.append(content_perms_dict)\n return {\n \"object_perms\": object_perms,\n \"other_perms\": other_perms,\n \"extra_perms_exist\": extra_perms_exist,\n }\n\n\[email protected]_tag(\"wagtailadmin/shared/buttons.html\", takes_context=True)\ndef user_listing_buttons(context, user):\n button_hooks = hooks.get_hooks(\"register_user_listing_buttons\")\n buttons = sorted(\n itertools.chain.from_iterable(hook(context, user) for hook in button_hooks)\n )\n return {\"user\": user, \"buttons\": buttons}\n", "path": "wagtail/users/templatetags/wagtailusers_tags.py"}]} | 2,916 | 827 |
gh_patches_debug_13438 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3307 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider vetco is broken
During the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))
</issue>
<code>
[start of locations/spiders/vetco_clinic.py]
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from scrapy.selector import Selector
9
10
11 class VetcoSpider(scrapy.Spider):
12 name = "vetco"
13 item_attributes = {'brand': "vetcoclinics"}
14 allowed_domains = ["vetcoclinics.com"]
15 start_urls = (
16 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',
17 )
18
19 def start_requests(self):
20 with open('./locations/searchable_points/us_zcta.csv') as points:
21 next(points) # Ignore the header
22 for point in points:
23 row = point.split(',')
24 zip = row[0].strip().strip('"')
25
26 url = f"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}"
27
28 yield scrapy.http.Request(
29 url,
30 self.parse,
31 method='GET'
32 )
33
34 def parse(self, response):
35 jsonresponse = json.loads(response.body_as_unicode())
36 if jsonresponse is not None:
37 clinics = jsonresponse.get('clinics')
38 if clinics:
39 for stores in clinics:
40 body = stores['label']
41 address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
42 if len(address) == 3:
43 addr_full, city_state_postal, phone = [item.split(",") for item in address]
44 city, state_postal = [item.split(",") for item in city_state_postal]
45 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
46
47
48 else:
49 addr_full, city_state_postal = [item.split(",") for item in address]
50 city, state_postal = [item.split(",") for item in city_state_postal]
51 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
52
53 properties = {
54 'ref': addr_full[0].strip(),
55 'addr_full': addr_full[0].strip(),
56 'city': city[0].strip(),
57 'state': state,
58 'postcode': postal,
59 'lat': float(stores["point"]["lat"]),
60 'lon': float(stores["point"]["long"]),
61 'website': response.url
62 }
63
64 yield GeojsonPointItem(**properties)
65
[end of locations/spiders/vetco_clinic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py
--- a/locations/spiders/vetco_clinic.py
+++ b/locations/spiders/vetco_clinic.py
@@ -38,7 +38,7 @@
if clinics:
for stores in clinics:
body = stores['label']
- address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
+ address = Selector(text=body).xpath('//address/text()').extract()
if len(address) == 3:
addr_full, city_state_postal, phone = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
| {"golden_diff": "diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py\n--- a/locations/spiders/vetco_clinic.py\n+++ b/locations/spiders/vetco_clinic.py\n@@ -38,7 +38,7 @@\n if clinics:\n for stores in clinics:\n body = stores['label']\n- address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n+ address = Selector(text=body).xpath('//address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n", "issue": "Spider vetco is broken\nDuring the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom scrapy.selector import Selector\n\n\nclass VetcoSpider(scrapy.Spider):\n name = \"vetco\"\n item_attributes = {'brand': \"vetcoclinics\"}\n allowed_domains = [\"vetcoclinics.com\"]\n start_urls = (\n 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',\n )\n\n def start_requests(self):\n with open('./locations/searchable_points/us_zcta.csv') as points:\n next(points) # Ignore the header\n for point in points:\n row = point.split(',')\n zip = row[0].strip().strip('\"')\n\n url = f\"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}\"\n\n yield scrapy.http.Request(\n url,\n self.parse,\n method='GET'\n )\n\n def parse(self, response):\n jsonresponse = json.loads(response.body_as_unicode())\n if jsonresponse is not None:\n clinics = jsonresponse.get('clinics')\n if clinics:\n for stores in clinics:\n body = stores['label']\n address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n\n else:\n addr_full, city_state_postal = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n properties = {\n 'ref': addr_full[0].strip(),\n 'addr_full': addr_full[0].strip(),\n 'city': city[0].strip(),\n 'state': state,\n 'postcode': postal,\n 'lat': float(stores[\"point\"][\"lat\"]),\n 'lon': float(stores[\"point\"][\"long\"]),\n 'website': response.url\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/vetco_clinic.py"}]} | 1,388 | 173 |
gh_patches_debug_8249 | rasdani/github-patches | git_diff | holoviz__panel-2611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change fast theme button color back to white
After upgrading to Panel 0.12.0 the fast theme button switched to the red color

I would propose switching it back to white because 1) It looks better 2) receives less attention 3) makes changing the style of the template easier.
With the white color it looks like

</issue>
<code>
[start of panel/template/fast/theme.py]
1 """
2 Functionality for styling according to Fast.design
3 """
4 import pathlib
5 import param
6
7 from bokeh.themes import Theme as _BkTheme
8
9 from ..theme import DarkTheme, DefaultTheme
10
11 _ROOT = pathlib.Path(__file__).parent / "css"
12
13 COLLAPSED_SVG_ICON = """
14 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="collapsed-icon">
15 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
16 <path d="M9 5.44446V12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
17 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
18 </svg>
19 """ # noqa
20
21 EXPANDED_SVG_ICON = """
22 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="expanded-icon">
23 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
24 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
25 </svg>
26 """ # noqa
27
28 FONT_URL = "//fonts.googleapis.com/css?family=Open+Sans"
29
30 class FastStyle(param.Parameterized):
31 """
32 The FastStyle class provides the different colors and icons used
33 to style the Fast Templates.
34 """
35
36 background_color = param.String(default="#ffffff")
37 neutral_color = param.String(default="#000000")
38 accent_base_color = param.String(default="#A01346")
39 collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)
40 expanded_icon = param.String(default=EXPANDED_SVG_ICON)
41 color = param.String(default="#00aa41")
42 neutral_fill_card_rest = param.String(default="#F7F7F7")
43 neutral_focus = param.String(default="#888888")
44 neutral_foreground_rest = param.String(default="#2B2B2B")
45
46 header_background = param.String(default="#00aa41")
47 header_neutral_color = param.String(default="#ffffff")
48 header_accent_base_color = param.String(default="#A01346")
49 header_color = param.String(default="#ffffff")
50 font = param.String(default="Open Sans, sans-serif")
51 font_url = param.String(default=FONT_URL)
52 corner_radius = param.Integer(default=3)
53 shadow = param.Boolean(default=True)
54
55 def create_bokeh_theme(self):
56 """Returns a custom bokeh theme based on the style parameters
57
58 Returns:
59 Dict: A Bokeh Theme
60 """
61
62 return {
63 "attrs": {
64 "Figure": {
65 "background_fill_color": self.background_color,
66 "border_fill_color": self.neutral_fill_card_rest,
67 "border_fill_alpha": 0,
68 "outline_line_color": self.neutral_focus,
69 "outline_line_alpha": 0.5,
70 "outline_line_width": 1,
71 },
72 "Grid": {"grid_line_color": self.neutral_focus, "grid_line_alpha": 0.25},
73 "Axis": {
74 "major_tick_line_alpha": 0.5,
75 "major_tick_line_color": self.neutral_foreground_rest,
76 "minor_tick_line_alpha": 0.25,
77 "minor_tick_line_color": self.neutral_foreground_rest,
78 "axis_line_alpha": 0.1,
79 "axis_line_color": self.neutral_foreground_rest,
80 "major_label_text_color": self.neutral_foreground_rest,
81 "major_label_text_font": self.font,
82 # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed
83 # "major_label_text_font_size": "1.025em",
84 "axis_label_standoff": 10,
85 "axis_label_text_color": self.neutral_foreground_rest,
86 "axis_label_text_font": self.font,
87 "axis_label_text_font_size": "1.25em",
88 "axis_label_text_font_style": "normal",
89 },
90 "Legend": {
91 "spacing": 8,
92 "glyph_width": 15,
93 "label_standoff": 8,
94 "label_text_color": self.neutral_foreground_rest,
95 "label_text_font": self.font,
96 "label_text_font_size": "1.025em",
97 "border_line_alpha": 0.5,
98 "border_line_color": self.neutral_focus,
99 "background_fill_alpha": 0.25,
100 "background_fill_color": self.neutral_fill_card_rest,
101 },
102 "ColorBar": {
103 "title_text_color": self.neutral_foreground_rest,
104 "title_text_font": self.font,
105 "title_text_font_size": "1.025em",
106 "title_text_font_style": "normal",
107 "major_label_text_color": self.neutral_foreground_rest,
108 "major_label_text_font": self.font,
109 "major_label_text_font_size": "1.025em",
110 # "background_fill_color": FAST_DARK_75,
111 "major_tick_line_alpha": 0,
112 "bar_line_alpha": 0,
113 },
114 "Title": {
115 "text_color": self.neutral_foreground_rest,
116 "text_font": self.font,
117 "text_font_size": "1.15em",
118 },
119 }
120 }
121
122
123 DEFAULT_STYLE = FastStyle()
124 DARK_STYLE = FastStyle(
125 accent_base_color="#E1477E",
126 background_color="#181818",
127 color="#ffffff",
128 header_background="#313131",
129 header_color="#ffffff",
130 neutral_fill_card_rest="#212121",
131 neutral_focus="#717171",
132 neutral_foreground_rest="#e5e5e5",
133 shadow = False,
134 )
135
136 class FastDefaultTheme(DefaultTheme):
137
138 base_css = param.Filename(default=_ROOT / 'fast_root_default.css')
139
140 style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)
141
142 __abstract = True
143
144 @property
145 def bokeh_theme(self):
146 return _BkTheme(json=self.style.create_bokeh_theme())
147
148
149 class FastDarkTheme(DarkTheme):
150
151 base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')
152
153 style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)
154
155 __abstract = True
156
157 @property
158 def bokeh_theme(self):
159 return _BkTheme(json=self.style.create_bokeh_theme())
160
[end of panel/template/fast/theme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py
--- a/panel/template/fast/theme.py
+++ b/panel/template/fast/theme.py
@@ -45,7 +45,7 @@
header_background = param.String(default="#00aa41")
header_neutral_color = param.String(default="#ffffff")
- header_accent_base_color = param.String(default="#A01346")
+ header_accent_base_color = param.String(default="#ffffff")
header_color = param.String(default="#ffffff")
font = param.String(default="Open Sans, sans-serif")
font_url = param.String(default=FONT_URL)
| {"golden_diff": "diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py\n--- a/panel/template/fast/theme.py\n+++ b/panel/template/fast/theme.py\n@@ -45,7 +45,7 @@\n \n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n- header_accent_base_color = param.String(default=\"#A01346\")\n+ header_accent_base_color = param.String(default=\"#ffffff\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n", "issue": "Change fast theme button color back to white\nAfter upgrading to Panel 0.12.0 the fast theme button switched to the red color\r\n\r\n\r\n\r\nI would propose switching it back to white because 1) It looks better 2) receives less attention 3) makes changing the style of the template easier.\r\n\r\nWith the white color it looks like\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nFunctionality for styling according to Fast.design\n\"\"\"\nimport pathlib\nimport param\n\nfrom bokeh.themes import Theme as _BkTheme\n\nfrom ..theme import DarkTheme, DefaultTheme\n\n_ROOT = pathlib.Path(__file__).parent / \"css\"\n\nCOLLAPSED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"collapsed-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M9 5.44446V12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nEXPANDED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"expanded-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nFONT_URL = \"//fonts.googleapis.com/css?family=Open+Sans\"\n\nclass FastStyle(param.Parameterized):\n \"\"\"\n The FastStyle class provides the different colors and icons used\n to style the Fast Templates.\n \"\"\"\n\n background_color = param.String(default=\"#ffffff\")\n neutral_color = param.String(default=\"#000000\")\n accent_base_color = param.String(default=\"#A01346\")\n collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)\n expanded_icon = param.String(default=EXPANDED_SVG_ICON)\n color = param.String(default=\"#00aa41\")\n neutral_fill_card_rest = param.String(default=\"#F7F7F7\")\n neutral_focus = param.String(default=\"#888888\")\n neutral_foreground_rest = param.String(default=\"#2B2B2B\")\n\n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n header_accent_base_color = param.String(default=\"#A01346\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n corner_radius = param.Integer(default=3)\n shadow = param.Boolean(default=True)\n\n def create_bokeh_theme(self):\n \"\"\"Returns a custom bokeh theme based on the style parameters\n\n Returns:\n Dict: A Bokeh Theme\n \"\"\"\n\n return {\n \"attrs\": {\n \"Figure\": {\n \"background_fill_color\": self.background_color,\n \"border_fill_color\": self.neutral_fill_card_rest,\n \"border_fill_alpha\": 0,\n \"outline_line_color\": self.neutral_focus,\n \"outline_line_alpha\": 0.5,\n \"outline_line_width\": 1,\n },\n \"Grid\": {\"grid_line_color\": self.neutral_focus, \"grid_line_alpha\": 0.25},\n \"Axis\": {\n \"major_tick_line_alpha\": 0.5,\n \"major_tick_line_color\": self.neutral_foreground_rest,\n \"minor_tick_line_alpha\": 0.25,\n \"minor_tick_line_color\": self.neutral_foreground_rest,\n \"axis_line_alpha\": 0.1,\n \"axis_line_color\": self.neutral_foreground_rest,\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed\n # \"major_label_text_font_size\": \"1.025em\",\n \"axis_label_standoff\": 10,\n \"axis_label_text_color\": self.neutral_foreground_rest,\n \"axis_label_text_font\": self.font,\n \"axis_label_text_font_size\": \"1.25em\",\n \"axis_label_text_font_style\": \"normal\",\n },\n \"Legend\": {\n \"spacing\": 8,\n \"glyph_width\": 15,\n \"label_standoff\": 8,\n \"label_text_color\": self.neutral_foreground_rest,\n \"label_text_font\": self.font,\n \"label_text_font_size\": \"1.025em\",\n \"border_line_alpha\": 0.5,\n \"border_line_color\": self.neutral_focus,\n \"background_fill_alpha\": 0.25,\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n \"title_text_font_style\": \"normal\",\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n # \"background_fill_color\": FAST_DARK_75,\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n \"Title\": {\n \"text_color\": self.neutral_foreground_rest,\n \"text_font\": self.font,\n \"text_font_size\": \"1.15em\",\n },\n }\n }\n\n\nDEFAULT_STYLE = FastStyle()\nDARK_STYLE = FastStyle(\n accent_base_color=\"#E1477E\",\n background_color=\"#181818\",\n color=\"#ffffff\",\n header_background=\"#313131\",\n header_color=\"#ffffff\",\n neutral_fill_card_rest=\"#212121\",\n neutral_focus=\"#717171\",\n neutral_foreground_rest=\"#e5e5e5\",\n shadow = False,\n)\n\nclass FastDefaultTheme(DefaultTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_default.css')\n\n style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n\n\nclass FastDarkTheme(DarkTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')\n\n style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n", "path": "panel/template/fast/theme.py"}]} | 2,980 | 142 |
gh_patches_debug_40056 | rasdani/github-patches | git_diff | arviz-devs__arviz-1074 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bokeh hover tooltip
We need to implement better default hover settings.
https://docs.bokeh.org/en/1.4.0/docs/reference/models/tools.html#bokeh.models.tools.HoverTool
</issue>
<code>
[start of arviz/plots/backends/bokeh/pairplot.py]
1 """Bokeh pairplot."""
2 import warnings
3 from uuid import uuid4
4
5 import bokeh.plotting as bkp
6 from bokeh.models import ColumnDataSource, CDSView, GroupFilter
7 import numpy as np
8
9 from . import backend_kwarg_defaults
10 from .. import show_layout
11 from ...kdeplot import plot_kde
12 from ...plot_utils import _scale_fig_size
13 from ....rcparams import rcParams
14
15
16 def plot_pair(
17 ax,
18 infdata_group,
19 numvars,
20 figsize,
21 textsize,
22 kind,
23 plot_kwargs,
24 contour,
25 fill_last,
26 divergences,
27 diverging_mask,
28 flat_var_names,
29 backend_kwargs,
30 show,
31 ):
32 """Bokeh pair plot."""
33 if backend_kwargs is None:
34 backend_kwargs = {}
35
36 backend_kwargs = {
37 **backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
38 **backend_kwargs,
39 }
40 dpi = backend_kwargs.pop("dpi")
41 if numvars == 2:
42 (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)
43
44 source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
45
46 if divergences:
47 divergenve_name = "divergences_{}".format(str(uuid4()))
48 source_dict[divergenve_name] = (
49 np.array(diverging_mask).astype(bool).astype(int).astype(str)
50 )
51
52 source = ColumnDataSource(data=source_dict)
53
54 if divergences:
55 source_nondiv = CDSView(
56 source=source, filters=[GroupFilter(column_name=divergenve_name, group="0")]
57 )
58 source_div = CDSView(
59 source=source, filters=[GroupFilter(column_name=divergenve_name, group="1")]
60 )
61
62 if ax is None:
63 backend_kwargs["width"] = int(figsize[0] * dpi)
64 backend_kwargs["height"] = int(figsize[1] * dpi)
65 ax = bkp.figure(**backend_kwargs)
66
67 if kind == "scatter":
68 if divergences:
69 ax.circle(
70 flat_var_names[0],
71 flat_var_names[1],
72 source=source,
73 view=source_nondiv,
74 legend_label="non-divergent",
75 )
76 else:
77 ax.circle(flat_var_names[0], flat_var_names[1], source=source)
78 elif kind == "kde":
79 plot_kde(
80 infdata_group[0],
81 infdata_group[1],
82 contour=contour,
83 fill_last=fill_last,
84 ax=ax,
85 backend="bokeh",
86 backend_kwargs={},
87 show=False,
88 )
89 else:
90 ax.hexbin(infdata_group[0], infdata_group[1], size=0.5)
91 ax.grid.visible = False
92
93 if divergences:
94 ax.circle(
95 flat_var_names[0],
96 flat_var_names[1],
97 line_color="black",
98 fill_color="orange",
99 line_width=1,
100 size=6,
101 source=source,
102 view=source_div,
103 legend_label="divergent",
104 )
105 ax.legend.click_policy = "hide"
106
107 ax.xaxis.axis_label = flat_var_names[0]
108 ax.yaxis.axis_label = flat_var_names[1]
109
110 show_layout(ax, show)
111
112 else:
113 max_plots = (
114 numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"]
115 )
116 vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)
117 if vars_to_plot < numvars:
118 warnings.warn(
119 "rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
120 "of resulting pair plots with these variables, generating only a "
121 "{side}x{side} grid".format(max_plots=max_plots, side=vars_to_plot),
122 UserWarning,
123 )
124 numvars = vars_to_plot
125
126 (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)
127
128 if ax is None:
129 ax = []
130 backend_kwargs.setdefault("width", int(figsize[0] / (numvars - 1) * dpi))
131 backend_kwargs.setdefault("height", int(figsize[1] / (numvars - 1) * dpi))
132 for row in range(numvars - 1):
133 row_ax = []
134 for col in range(numvars - 1):
135 if row < col:
136 row_ax.append(None)
137 else:
138 ax_ = bkp.figure(**backend_kwargs)
139 row_ax.append(ax_)
140 ax.append(row_ax)
141 ax = np.array(ax)
142
143 tmp_flat_var_names = None
144 if len(flat_var_names) == len(list(set(flat_var_names))):
145 source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
146 else:
147 tmp_flat_var_names = ["{}__{}".format(name, str(uuid4())) for name in flat_var_names]
148 source_dict = dict(zip(tmp_flat_var_names, [list(post) for post in infdata_group]))
149 if divergences:
150 divergenve_name = "divergences_{}".format(str(uuid4()))
151 source_dict[divergenve_name] = (
152 np.array(diverging_mask).astype(bool).astype(int).astype(str)
153 )
154
155 source = ColumnDataSource(data=source_dict)
156
157 if divergences:
158 source_nondiv = CDSView(
159 source=source, filters=[GroupFilter(column_name=divergenve_name, group="0")]
160 )
161 source_div = CDSView(
162 source=source, filters=[GroupFilter(column_name=divergenve_name, group="1")]
163 )
164
165 for i in range(0, numvars - 1):
166 var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]
167
168 for j in range(0, numvars - 1):
169 if j < i:
170 continue
171
172 var2 = (
173 flat_var_names[j + 1]
174 if tmp_flat_var_names is None
175 else tmp_flat_var_names[j + 1]
176 )
177
178 if kind == "scatter":
179 if divergences:
180 ax[j, i].circle(var1, var2, source=source, view=source_nondiv)
181 else:
182 ax[j, i].circle(var1, var2, source=source)
183
184 elif kind == "kde":
185 var1_kde = infdata_group[i]
186 var2_kde = infdata_group[j + 1]
187 plot_kde(
188 var1_kde,
189 var2_kde,
190 contour=contour,
191 fill_last=fill_last,
192 ax=ax[j, i],
193 backend="bokeh",
194 backend_kwargs={},
195 show=False,
196 **plot_kwargs
197 )
198
199 else:
200 var1_hexbin = infdata_group[i]
201 var2_hexbin = infdata_group[j + 1]
202 ax[j, i].grid.visible = False
203 ax[j, i].hexbin(var1_hexbin, var2_hexbin, size=0.5)
204
205 if divergences:
206 ax[j, i].circle(
207 var1,
208 var2,
209 line_color="black",
210 fill_color="orange",
211 line_width=1,
212 size=10,
213 source=source,
214 view=source_div,
215 )
216
217 ax[j, i].xaxis.axis_label = flat_var_names[i]
218 ax[j, i].yaxis.axis_label = flat_var_names[j + 1]
219
220 show_layout(ax, show)
221
222 return ax
223
[end of arviz/plots/backends/bokeh/pairplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/backends/bokeh/pairplot.py b/arviz/plots/backends/bokeh/pairplot.py
--- a/arviz/plots/backends/bokeh/pairplot.py
+++ b/arviz/plots/backends/bokeh/pairplot.py
@@ -39,6 +39,13 @@
}
dpi = backend_kwargs.pop("dpi")
if numvars == 2:
+ if kind == "scatter":
+ tooltips = [
+ (flat_var_names[1], "@{{{}}}".format(flat_var_names[1])),
+ (flat_var_names[0], "@{{{}}}".format(flat_var_names[0])),
+ ]
+ backend_kwargs.setdefault("tooltips", tooltips)
+
(figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)
source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
@@ -125,21 +132,6 @@
(figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)
- if ax is None:
- ax = []
- backend_kwargs.setdefault("width", int(figsize[0] / (numvars - 1) * dpi))
- backend_kwargs.setdefault("height", int(figsize[1] / (numvars - 1) * dpi))
- for row in range(numvars - 1):
- row_ax = []
- for col in range(numvars - 1):
- if row < col:
- row_ax.append(None)
- else:
- ax_ = bkp.figure(**backend_kwargs)
- row_ax.append(ax_)
- ax.append(row_ax)
- ax = np.array(ax)
-
tmp_flat_var_names = None
if len(flat_var_names) == len(list(set(flat_var_names))):
source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
@@ -162,6 +154,40 @@
source=source, filters=[GroupFilter(column_name=divergenve_name, group="1")]
)
+ if ax is None:
+ ax = []
+ backend_kwargs.setdefault("width", int(figsize[0] / (numvars - 1) * dpi))
+ backend_kwargs.setdefault("height", int(figsize[1] / (numvars - 1) * dpi))
+ for row in range(numvars - 1):
+ row_ax = []
+ var2 = (
+ flat_var_names[row + 1]
+ if tmp_flat_var_names is None
+ else tmp_flat_var_names[row + 1]
+ )
+ for col in range(numvars - 1):
+ if row < col:
+ row_ax.append(None)
+ continue
+
+ var1 = (
+ flat_var_names[col]
+ if tmp_flat_var_names is None
+ else tmp_flat_var_names[col]
+ )
+ backend_kwargs_copy = backend_kwargs.copy()
+ if kind == "scatter":
+ tooltips = [
+ (var2, "@{{{}}}".format(var2)),
+ (var1, "@{{{}}}".format(var1)),
+ ]
+ backend_kwargs_copy.setdefault("tooltips", tooltips)
+
+ ax_ = bkp.figure(**backend_kwargs_copy)
+ row_ax.append(ax_)
+ ax.append(row_ax)
+ ax = np.array(ax)
+
for i in range(0, numvars - 1):
var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]
| {"golden_diff": "diff --git a/arviz/plots/backends/bokeh/pairplot.py b/arviz/plots/backends/bokeh/pairplot.py\n--- a/arviz/plots/backends/bokeh/pairplot.py\n+++ b/arviz/plots/backends/bokeh/pairplot.py\n@@ -39,6 +39,13 @@\n }\n dpi = backend_kwargs.pop(\"dpi\")\n if numvars == 2:\n+ if kind == \"scatter\":\n+ tooltips = [\n+ (flat_var_names[1], \"@{{{}}}\".format(flat_var_names[1])),\n+ (flat_var_names[0], \"@{{{}}}\".format(flat_var_names[0])),\n+ ]\n+ backend_kwargs.setdefault(\"tooltips\", tooltips)\n+\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)\n \n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n@@ -125,21 +132,6 @@\n \n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)\n \n- if ax is None:\n- ax = []\n- backend_kwargs.setdefault(\"width\", int(figsize[0] / (numvars - 1) * dpi))\n- backend_kwargs.setdefault(\"height\", int(figsize[1] / (numvars - 1) * dpi))\n- for row in range(numvars - 1):\n- row_ax = []\n- for col in range(numvars - 1):\n- if row < col:\n- row_ax.append(None)\n- else:\n- ax_ = bkp.figure(**backend_kwargs)\n- row_ax.append(ax_)\n- ax.append(row_ax)\n- ax = np.array(ax)\n-\n tmp_flat_var_names = None\n if len(flat_var_names) == len(list(set(flat_var_names))):\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n@@ -162,6 +154,40 @@\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n \n+ if ax is None:\n+ ax = []\n+ backend_kwargs.setdefault(\"width\", int(figsize[0] / (numvars - 1) * dpi))\n+ backend_kwargs.setdefault(\"height\", int(figsize[1] / (numvars - 1) * dpi))\n+ for row in range(numvars - 1):\n+ row_ax = []\n+ var2 = (\n+ flat_var_names[row + 1]\n+ if tmp_flat_var_names is None\n+ else tmp_flat_var_names[row + 1]\n+ )\n+ for col in range(numvars - 1):\n+ if row < col:\n+ row_ax.append(None)\n+ continue\n+\n+ var1 = (\n+ flat_var_names[col]\n+ if tmp_flat_var_names is None\n+ else tmp_flat_var_names[col]\n+ )\n+ backend_kwargs_copy = backend_kwargs.copy()\n+ if kind == \"scatter\":\n+ tooltips = [\n+ (var2, \"@{{{}}}\".format(var2)),\n+ (var1, \"@{{{}}}\".format(var1)),\n+ ]\n+ backend_kwargs_copy.setdefault(\"tooltips\", tooltips)\n+\n+ ax_ = bkp.figure(**backend_kwargs_copy)\n+ row_ax.append(ax_)\n+ ax.append(row_ax)\n+ ax = np.array(ax)\n+\n for i in range(0, numvars - 1):\n var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]\n", "issue": "Bokeh hover tooltip\nWe need to implement better default hover settings.\r\n\r\nhttps://docs.bokeh.org/en/1.4.0/docs/reference/models/tools.html#bokeh.models.tools.HoverTool\n", "before_files": [{"content": "\"\"\"Bokeh pairplot.\"\"\"\nimport warnings\nfrom uuid import uuid4\n\nimport bokeh.plotting as bkp\nfrom bokeh.models import ColumnDataSource, CDSView, GroupFilter\nimport numpy as np\n\nfrom . import backend_kwarg_defaults\nfrom .. import show_layout\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import _scale_fig_size\nfrom ....rcparams import rcParams\n\n\ndef plot_pair(\n ax,\n infdata_group,\n numvars,\n figsize,\n textsize,\n kind,\n plot_kwargs,\n contour,\n fill_last,\n divergences,\n diverging_mask,\n flat_var_names,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh pair plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults((\"dpi\", \"plot.bokeh.figure.dpi\"),),\n **backend_kwargs,\n }\n dpi = backend_kwargs.pop(\"dpi\")\n if numvars == 2:\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)\n\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n\n if divergences:\n divergenve_name = \"divergences_{}\".format(str(uuid4()))\n source_dict[divergenve_name] = (\n np.array(diverging_mask).astype(bool).astype(int).astype(str)\n )\n\n source = ColumnDataSource(data=source_dict)\n\n if divergences:\n source_nondiv = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"0\")]\n )\n source_div = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n\n if ax is None:\n backend_kwargs[\"width\"] = int(figsize[0] * dpi)\n backend_kwargs[\"height\"] = int(figsize[1] * dpi)\n ax = bkp.figure(**backend_kwargs)\n\n if kind == \"scatter\":\n if divergences:\n ax.circle(\n flat_var_names[0],\n flat_var_names[1],\n source=source,\n view=source_nondiv,\n legend_label=\"non-divergent\",\n )\n else:\n ax.circle(flat_var_names[0], flat_var_names[1], source=source)\n elif kind == \"kde\":\n plot_kde(\n infdata_group[0],\n infdata_group[1],\n contour=contour,\n fill_last=fill_last,\n ax=ax,\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n )\n else:\n ax.hexbin(infdata_group[0], infdata_group[1], size=0.5)\n ax.grid.visible = False\n\n if divergences:\n ax.circle(\n flat_var_names[0],\n flat_var_names[1],\n line_color=\"black\",\n fill_color=\"orange\",\n line_width=1,\n size=6,\n source=source,\n view=source_div,\n legend_label=\"divergent\",\n )\n ax.legend.click_policy = \"hide\"\n\n ax.xaxis.axis_label = flat_var_names[0]\n ax.yaxis.axis_label = flat_var_names[1]\n\n show_layout(ax, show)\n\n else:\n max_plots = (\n numvars ** 2 if rcParams[\"plot.max_subplots\"] is None else rcParams[\"plot.max_subplots\"]\n )\n vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)\n if vars_to_plot < numvars:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of resulting pair plots with these variables, generating only a \"\n \"{side}x{side} grid\".format(max_plots=max_plots, side=vars_to_plot),\n UserWarning,\n )\n numvars = vars_to_plot\n\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)\n\n if ax is None:\n ax = []\n backend_kwargs.setdefault(\"width\", int(figsize[0] / (numvars - 1) * dpi))\n backend_kwargs.setdefault(\"height\", int(figsize[1] / (numvars - 1) * dpi))\n for row in range(numvars - 1):\n row_ax = []\n for col in range(numvars - 1):\n if row < col:\n row_ax.append(None)\n else:\n ax_ = bkp.figure(**backend_kwargs)\n row_ax.append(ax_)\n ax.append(row_ax)\n ax = np.array(ax)\n\n tmp_flat_var_names = None\n if len(flat_var_names) == len(list(set(flat_var_names))):\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n else:\n tmp_flat_var_names = [\"{}__{}\".format(name, str(uuid4())) for name in flat_var_names]\n source_dict = dict(zip(tmp_flat_var_names, [list(post) for post in infdata_group]))\n if divergences:\n divergenve_name = \"divergences_{}\".format(str(uuid4()))\n source_dict[divergenve_name] = (\n np.array(diverging_mask).astype(bool).astype(int).astype(str)\n )\n\n source = ColumnDataSource(data=source_dict)\n\n if divergences:\n source_nondiv = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"0\")]\n )\n source_div = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n\n for i in range(0, numvars - 1):\n var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]\n\n for j in range(0, numvars - 1):\n if j < i:\n continue\n\n var2 = (\n flat_var_names[j + 1]\n if tmp_flat_var_names is None\n else tmp_flat_var_names[j + 1]\n )\n\n if kind == \"scatter\":\n if divergences:\n ax[j, i].circle(var1, var2, source=source, view=source_nondiv)\n else:\n ax[j, i].circle(var1, var2, source=source)\n\n elif kind == \"kde\":\n var1_kde = infdata_group[i]\n var2_kde = infdata_group[j + 1]\n plot_kde(\n var1_kde,\n var2_kde,\n contour=contour,\n fill_last=fill_last,\n ax=ax[j, i],\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n **plot_kwargs\n )\n\n else:\n var1_hexbin = infdata_group[i]\n var2_hexbin = infdata_group[j + 1]\n ax[j, i].grid.visible = False\n ax[j, i].hexbin(var1_hexbin, var2_hexbin, size=0.5)\n\n if divergences:\n ax[j, i].circle(\n var1,\n var2,\n line_color=\"black\",\n fill_color=\"orange\",\n line_width=1,\n size=10,\n source=source,\n view=source_div,\n )\n\n ax[j, i].xaxis.axis_label = flat_var_names[i]\n ax[j, i].yaxis.axis_label = flat_var_names[j + 1]\n\n show_layout(ax, show)\n\n return ax\n", "path": "arviz/plots/backends/bokeh/pairplot.py"}]} | 2,849 | 827 |
gh_patches_debug_40783 | rasdani/github-patches | git_diff | iterative__dvc-4848 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dvc version: show external cache and remotes being used
This would have made my life easier when investigating https://github.com/iterative/dvc/pull/4570.
Another question to ask is that `dvc version` can only have a limited amount of information. Should there be `dvc version --json`?
</issue>
<code>
[start of dvc/tree/__init__.py]
1 import posixpath
2 from urllib.parse import urlparse
3
4 from .azure import AzureTree
5 from .gdrive import GDriveTree
6 from .gs import GSTree
7 from .hdfs import HDFSTree
8 from .http import HTTPTree
9 from .https import HTTPSTree
10 from .local import LocalTree
11 from .oss import OSSTree
12 from .s3 import S3Tree
13 from .ssh import SSHTree
14 from .webdav import WebDAVTree
15 from .webdavs import WebDAVSTree
16 from .webhdfs import WebHDFSTree
17
18 TREES = [
19 AzureTree,
20 GDriveTree,
21 GSTree,
22 HDFSTree,
23 HTTPTree,
24 HTTPSTree,
25 S3Tree,
26 SSHTree,
27 OSSTree,
28 WebDAVTree,
29 WebDAVSTree,
30 WebHDFSTree
31 # NOTE: LocalTree is the default
32 ]
33
34
35 def _get_tree(remote_conf):
36 for tree_cls in TREES:
37 if tree_cls.supported(remote_conf):
38 return tree_cls
39 return LocalTree
40
41
42 def _get_conf(repo, **kwargs):
43 name = kwargs.get("name")
44 if name:
45 remote_conf = repo.config["remote"][name.lower()]
46 else:
47 remote_conf = kwargs
48 return _resolve_remote_refs(repo, remote_conf)
49
50
51 def _resolve_remote_refs(repo, remote_conf):
52 # Support for cross referenced remotes.
53 # This will merge the settings, shadowing base ref with remote_conf.
54 # For example, having:
55 #
56 # dvc remote add server ssh://localhost
57 # dvc remote modify server user root
58 # dvc remote modify server ask_password true
59 #
60 # dvc remote add images remote://server/tmp/pictures
61 # dvc remote modify images user alice
62 # dvc remote modify images ask_password false
63 # dvc remote modify images password asdf1234
64 #
65 # Results on a config dictionary like:
66 #
67 # {
68 # "url": "ssh://localhost/tmp/pictures",
69 # "user": "alice",
70 # "password": "asdf1234",
71 # "ask_password": False,
72 # }
73 parsed = urlparse(remote_conf["url"])
74 if parsed.scheme != "remote":
75 return remote_conf
76
77 base = _get_conf(repo, name=parsed.netloc)
78 url = posixpath.join(base["url"], parsed.path.lstrip("/"))
79 return {**base, **remote_conf, "url": url}
80
81
82 def get_cloud_tree(repo, **kwargs):
83 from dvc.config import SCHEMA, ConfigError, Invalid
84
85 remote_conf = _get_conf(repo, **kwargs)
86 try:
87 remote_conf = SCHEMA["remote"][str](remote_conf)
88 except Invalid as exc:
89 raise ConfigError(str(exc)) from None
90 return _get_tree(remote_conf)(repo, remote_conf)
91
[end of dvc/tree/__init__.py]
[start of dvc/info.py]
1 import itertools
2 import os
3 import pathlib
4 import platform
5 import uuid
6
7 from dvc.exceptions import DvcException, NotDvcRepoError
8 from dvc.repo import Repo
9 from dvc.scm.base import SCMError
10 from dvc.system import System
11 from dvc.tree import TREES
12 from dvc.utils import error_link
13 from dvc.utils.pkg import PKG
14 from dvc.version import __version__
15
16 try:
17 import psutil
18 except ImportError:
19 psutil = None
20
21 if PKG is None:
22 package = ""
23 else:
24 package = f"({PKG})"
25
26
27 def get_dvc_info():
28 info = [
29 f"DVC version: {__version__} {package}",
30 "---------------------------------",
31 f"Platform: Python {platform.python_version()} on "
32 f"{platform.platform()}",
33 f"Supports: {_get_supported_remotes()}",
34 ]
35
36 try:
37 repo = Repo()
38
39 # cache_dir might not exist yet (e.g. after `dvc init`), and we
40 # can't auto-create it, as it might cause issues if the user
41 # later decides to enable shared cache mode with
42 # `dvc config cache.shared group`.
43 if os.path.exists(repo.cache.local.cache_dir):
44 info.append(
45 "Cache types: {}".format(_get_linktype_support_info(repo))
46 )
47 if psutil:
48 fs_type = get_fs_type(repo.cache.local.cache_dir)
49 info.append(f"Cache directory: {fs_type}")
50 else:
51 info.append("Cache types: " + error_link("no-dvc-cache"))
52
53 except NotDvcRepoError:
54 pass
55 except SCMError:
56 info.append("Repo: dvc, git (broken)")
57 else:
58 root_directory = repo.root_dir
59 if psutil:
60 fs_root = get_fs_type(os.path.abspath(root_directory))
61 info.append(f"Workspace directory: {fs_root}")
62 info.append("Repo: {}".format(_get_dvc_repo_info(repo)))
63 return "\n".join(info)
64
65
66 def _get_linktype_support_info(repo):
67
68 links = {
69 "reflink": (System.reflink, None),
70 "hardlink": (System.hardlink, System.is_hardlink),
71 "symlink": (System.symlink, System.is_symlink),
72 }
73
74 fname = "." + str(uuid.uuid4())
75 src = os.path.join(repo.cache.local.cache_dir, fname)
76 open(src, "w").close()
77 dst = os.path.join(repo.root_dir, fname)
78
79 cache = []
80
81 for name, (link, is_link) in links.items():
82 try:
83 link(src, dst)
84 status = "supported"
85 if is_link and not is_link(dst):
86 status = "broken"
87 os.unlink(dst)
88 except DvcException:
89 status = "not supported"
90
91 if status == "supported":
92 cache.append(name)
93 os.remove(src)
94
95 return ", ".join(cache)
96
97
98 def _get_supported_remotes():
99
100 supported_remotes = []
101 for tree_cls in TREES:
102 if not tree_cls.get_missing_deps():
103 supported_remotes.append(tree_cls.scheme)
104
105 if len(supported_remotes) == len(TREES):
106 return "All remotes"
107
108 if len(supported_remotes) == 1:
109 return supported_remotes
110
111 return ", ".join(supported_remotes)
112
113
114 def get_fs_type(path):
115
116 partition = {
117 pathlib.Path(part.mountpoint): (part.fstype + " on " + part.device)
118 for part in psutil.disk_partitions(all=True)
119 }
120
121 path = pathlib.Path(path)
122
123 for parent in itertools.chain([path], path.parents):
124 if parent in partition:
125 return partition[parent]
126 return ("unknown", "none")
127
128
129 def _get_dvc_repo_info(self):
130 if self.config.get("core", {}).get("no_scm", False):
131 return "dvc (no_scm)"
132
133 if self.root_dir != self.scm.root_dir:
134 return "dvc (subdir), git"
135
136 return "dvc, git"
137
[end of dvc/info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/info.py b/dvc/info.py
--- a/dvc/info.py
+++ b/dvc/info.py
@@ -8,7 +8,7 @@
from dvc.repo import Repo
from dvc.scm.base import SCMError
from dvc.system import System
-from dvc.tree import TREES
+from dvc.tree import TREES, get_tree_cls, get_tree_config
from dvc.utils import error_link
from dvc.utils.pkg import PKG
from dvc.version import __version__
@@ -50,6 +50,10 @@
else:
info.append("Cache types: " + error_link("no-dvc-cache"))
+ info.append(f"Caches: {_get_caches(repo.cache)}")
+
+ info.append(f"Remotes: {_get_remotes(repo.config)}")
+
except NotDvcRepoError:
pass
except SCMError:
@@ -63,6 +67,26 @@
return "\n".join(info)
+def _get_caches(cache):
+ caches = (
+ cache_type
+ for cache_type, cache_instance in cache.by_scheme()
+ if cache_instance
+ )
+
+ # Caches will be always non-empty including the local cache
+ return ", ".join(caches)
+
+
+def _get_remotes(config):
+ schemes = (
+ get_tree_cls(get_tree_config(config, name=remote)).scheme
+ for remote in config["remote"]
+ )
+
+ return ", ".join(schemes) or "None"
+
+
def _get_linktype_support_info(repo):
links = {
diff --git a/dvc/tree/__init__.py b/dvc/tree/__init__.py
--- a/dvc/tree/__init__.py
+++ b/dvc/tree/__init__.py
@@ -32,23 +32,23 @@
]
-def _get_tree(remote_conf):
+def get_tree_cls(remote_conf):
for tree_cls in TREES:
if tree_cls.supported(remote_conf):
return tree_cls
return LocalTree
-def _get_conf(repo, **kwargs):
+def get_tree_config(config, **kwargs):
name = kwargs.get("name")
if name:
- remote_conf = repo.config["remote"][name.lower()]
+ remote_conf = config["remote"][name.lower()]
else:
remote_conf = kwargs
- return _resolve_remote_refs(repo, remote_conf)
+ return _resolve_remote_refs(config, remote_conf)
-def _resolve_remote_refs(repo, remote_conf):
+def _resolve_remote_refs(config, remote_conf):
# Support for cross referenced remotes.
# This will merge the settings, shadowing base ref with remote_conf.
# For example, having:
@@ -74,7 +74,7 @@
if parsed.scheme != "remote":
return remote_conf
- base = _get_conf(repo, name=parsed.netloc)
+ base = get_tree_config(config, name=parsed.netloc)
url = posixpath.join(base["url"], parsed.path.lstrip("/"))
return {**base, **remote_conf, "url": url}
@@ -82,9 +82,9 @@
def get_cloud_tree(repo, **kwargs):
from dvc.config import SCHEMA, ConfigError, Invalid
- remote_conf = _get_conf(repo, **kwargs)
+ remote_conf = get_tree_config(repo.config, **kwargs)
try:
remote_conf = SCHEMA["remote"][str](remote_conf)
except Invalid as exc:
raise ConfigError(str(exc)) from None
- return _get_tree(remote_conf)(repo, remote_conf)
+ return get_tree_cls(remote_conf)(repo, remote_conf)
| {"golden_diff": "diff --git a/dvc/info.py b/dvc/info.py\n--- a/dvc/info.py\n+++ b/dvc/info.py\n@@ -8,7 +8,7 @@\n from dvc.repo import Repo\n from dvc.scm.base import SCMError\n from dvc.system import System\n-from dvc.tree import TREES\n+from dvc.tree import TREES, get_tree_cls, get_tree_config\n from dvc.utils import error_link\n from dvc.utils.pkg import PKG\n from dvc.version import __version__\n@@ -50,6 +50,10 @@\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n \n+ info.append(f\"Caches: {_get_caches(repo.cache)}\")\n+\n+ info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n+\n except NotDvcRepoError:\n pass\n except SCMError:\n@@ -63,6 +67,26 @@\n return \"\\n\".join(info)\n \n \n+def _get_caches(cache):\n+ caches = (\n+ cache_type\n+ for cache_type, cache_instance in cache.by_scheme()\n+ if cache_instance\n+ )\n+\n+ # Caches will be always non-empty including the local cache\n+ return \", \".join(caches)\n+\n+\n+def _get_remotes(config):\n+ schemes = (\n+ get_tree_cls(get_tree_config(config, name=remote)).scheme\n+ for remote in config[\"remote\"]\n+ )\n+\n+ return \", \".join(schemes) or \"None\"\n+\n+\n def _get_linktype_support_info(repo):\n \n links = {\ndiff --git a/dvc/tree/__init__.py b/dvc/tree/__init__.py\n--- a/dvc/tree/__init__.py\n+++ b/dvc/tree/__init__.py\n@@ -32,23 +32,23 @@\n ]\n \n \n-def _get_tree(remote_conf):\n+def get_tree_cls(remote_conf):\n for tree_cls in TREES:\n if tree_cls.supported(remote_conf):\n return tree_cls\n return LocalTree\n \n \n-def _get_conf(repo, **kwargs):\n+def get_tree_config(config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n- remote_conf = repo.config[\"remote\"][name.lower()]\n+ remote_conf = config[\"remote\"][name.lower()]\n else:\n remote_conf = kwargs\n- return _resolve_remote_refs(repo, remote_conf)\n+ return _resolve_remote_refs(config, remote_conf)\n \n \n-def _resolve_remote_refs(repo, remote_conf):\n+def _resolve_remote_refs(config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n@@ -74,7 +74,7 @@\n if parsed.scheme != \"remote\":\n return remote_conf\n \n- base = _get_conf(repo, name=parsed.netloc)\n+ base = get_tree_config(config, name=parsed.netloc)\n url = posixpath.join(base[\"url\"], parsed.path.lstrip(\"/\"))\n return {**base, **remote_conf, \"url\": url}\n \n@@ -82,9 +82,9 @@\n def get_cloud_tree(repo, **kwargs):\n from dvc.config import SCHEMA, ConfigError, Invalid\n \n- remote_conf = _get_conf(repo, **kwargs)\n+ remote_conf = get_tree_config(repo.config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise ConfigError(str(exc)) from None\n- return _get_tree(remote_conf)(repo, remote_conf)\n+ return get_tree_cls(remote_conf)(repo, remote_conf)\n", "issue": "dvc version: show external cache and remotes being used\nThis would have made my life easier when investigating https://github.com/iterative/dvc/pull/4570.\r\n\r\nAnother question to ask is that `dvc version` can only have a limited amount of information. Should there be `dvc version --json`?\r\n\r\n\n", "before_files": [{"content": "import posixpath\nfrom urllib.parse import urlparse\n\nfrom .azure import AzureTree\nfrom .gdrive import GDriveTree\nfrom .gs import GSTree\nfrom .hdfs import HDFSTree\nfrom .http import HTTPTree\nfrom .https import HTTPSTree\nfrom .local import LocalTree\nfrom .oss import OSSTree\nfrom .s3 import S3Tree\nfrom .ssh import SSHTree\nfrom .webdav import WebDAVTree\nfrom .webdavs import WebDAVSTree\nfrom .webhdfs import WebHDFSTree\n\nTREES = [\n AzureTree,\n GDriveTree,\n GSTree,\n HDFSTree,\n HTTPTree,\n HTTPSTree,\n S3Tree,\n SSHTree,\n OSSTree,\n WebDAVTree,\n WebDAVSTree,\n WebHDFSTree\n # NOTE: LocalTree is the default\n]\n\n\ndef _get_tree(remote_conf):\n for tree_cls in TREES:\n if tree_cls.supported(remote_conf):\n return tree_cls\n return LocalTree\n\n\ndef _get_conf(repo, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n remote_conf = repo.config[\"remote\"][name.lower()]\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(repo, remote_conf)\n\n\ndef _resolve_remote_refs(repo, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = _get_conf(repo, name=parsed.netloc)\n url = posixpath.join(base[\"url\"], parsed.path.lstrip(\"/\"))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_tree(repo, **kwargs):\n from dvc.config import SCHEMA, ConfigError, Invalid\n\n remote_conf = _get_conf(repo, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise ConfigError(str(exc)) from None\n return _get_tree(remote_conf)(repo, remote_conf)\n", "path": "dvc/tree/__init__.py"}, {"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n path = pathlib.Path(path)\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}]} | 2,646 | 809 |
gh_patches_debug_51335 | rasdani/github-patches | git_diff | beetbox__beets-1650 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plexupdate plugin crashed
Every time after import plexupdate plugin crashed with this error:
```
Traceback (most recent call last):
File "/usr/local/bin/beet", line 9, in <module>
load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1163, in main
_raw_main(args)
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1155, in _raw_main
plugins.send('cli_exit', lib=lib)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 458, in send
result = handler(**arguments)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 123, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 87, in update
config['plex']['library_name'].get())
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 41, in update_plex
section_key = get_music_section(host, port, token, library_name)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 31, in get_music_section
tree = ET.fromstring(r.raw)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1300, in XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1640, in feed
self._parser.Parse(data, 0)
TypeError: must be string or read-only buffer, not HTTPResponse
```
</issue>
<code>
[start of beetsplug/plexupdate.py]
1 """Updates an Plex library whenever the beets library is changed.
2
3 Plex Home users enter the Plex Token to enable updating.
4 Put something like the following in your config.yaml to configure:
5 plex:
6 host: localhost
7 port: 32400
8 token: token
9 """
10 from __future__ import (division, absolute_import, print_function,
11 unicode_literals)
12
13 import requests
14 from urlparse import urljoin
15 from urllib import urlencode
16 import xml.etree.ElementTree as ET
17 from beets import config
18 from beets.plugins import BeetsPlugin
19
20
21 def get_music_section(host, port, token, library_name):
22 """Getting the section key for the music library in Plex.
23 """
24 api_endpoint = append_token('library/sections', token)
25 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
26
27 # Sends request.
28 r = requests.get(url)
29
30 # Parse xml tree and extract music section key.
31 tree = ET.fromstring(r.text)
32 for child in tree.findall('Directory'):
33 if child.get('title') == library_name:
34 return child.get('key')
35
36
37 def update_plex(host, port, token, library_name):
38 """Sends request to the Plex api to start a library refresh.
39 """
40 # Getting section key and build url.
41 section_key = get_music_section(host, port, token, library_name)
42 api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
43 api_endpoint = append_token(api_endpoint, token)
44 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
45
46 # Sends request and returns requests object.
47 r = requests.get(url)
48 return r
49
50
51 def append_token(url, token):
52 """Appends the Plex Home token to the api call if required.
53 """
54 if token:
55 url += '?' + urlencode({'X-Plex-Token': token})
56 return url
57
58
59 class PlexUpdate(BeetsPlugin):
60 def __init__(self):
61 super(PlexUpdate, self).__init__()
62
63 # Adding defaults.
64 config['plex'].add({
65 u'host': u'localhost',
66 u'port': 32400,
67 u'token': u'',
68 u'library_name': u'Music'})
69
70 self.register_listener('database_change', self.listen_for_db_change)
71
72 def listen_for_db_change(self, lib, model):
73 """Listens for beets db change and register the update for the end"""
74 self.register_listener('cli_exit', self.update)
75
76 def update(self, lib):
77 """When the client exists try to send refresh request to Plex server.
78 """
79 self._log.info('Updating Plex library...')
80
81 # Try to send update request.
82 try:
83 update_plex(
84 config['plex']['host'].get(),
85 config['plex']['port'].get(),
86 config['plex']['token'].get(),
87 config['plex']['library_name'].get())
88 self._log.info('... started.')
89
90 except requests.exceptions.RequestException:
91 self._log.warning('Update failed.')
92
[end of beetsplug/plexupdate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py
--- a/beetsplug/plexupdate.py
+++ b/beetsplug/plexupdate.py
@@ -28,7 +28,7 @@
r = requests.get(url)
# Parse xml tree and extract music section key.
- tree = ET.fromstring(r.text)
+ tree = ET.fromstring(r.content)
for child in tree.findall('Directory'):
if child.get('title') == library_name:
return child.get('key')
| {"golden_diff": "diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py\n--- a/beetsplug/plexupdate.py\n+++ b/beetsplug/plexupdate.py\n@@ -28,7 +28,7 @@\n r = requests.get(url)\n \n # Parse xml tree and extract music section key.\n- tree = ET.fromstring(r.text)\n+ tree = ET.fromstring(r.content)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n", "issue": "plexupdate plugin crashed\nEvery time after import plexupdate plugin crashed with this error:\n\n```\nTraceback (most recent call last):\n File \"/usr/local/bin/beet\", line 9, in <module>\n load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1163, in main\n _raw_main(args)\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1155, in _raw_main\n plugins.send('cli_exit', lib=lib)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 458, in send\n result = handler(**arguments)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 123, in wrapper\n return func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 87, in update\n config['plex']['library_name'].get())\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 41, in update_plex\n section_key = get_music_section(host, port, token, library_name)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 31, in get_music_section\n tree = ET.fromstring(r.raw)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1300, in XML\n parser.feed(text)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1640, in feed\n self._parser.Parse(data, 0)\nTypeError: must be string or read-only buffer, not HTTPResponse\n```\n\n", "before_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n\n\ndef update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u'',\n u'library_name': u'Music'})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get(),\n config['plex']['library_name'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}]} | 1,806 | 118 |
gh_patches_debug_34669 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-125 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy
We decided that PlasmaPy will only be supported for:
- Python version > 3.6
- Astropy version > 2.0
- NumPy version > 1.13
However, when I try to run:
```ShellSession
python setup.py install
```
from the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.
When I try to run
```Python
import plasmapy
```
in Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.
We should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.
Thank you!
Nick
</issue>
<code>
[start of plasmapy/__init__.py]
1 from ._metadata import (
2 name as __name__,
3 version as __version__,
4 description as __doc__,
5 author as __author__,
6 )
7
8 from .classes import Plasma
9 from . import classes
10 from . import constants
11 from . import atomic
12 from . import math
13 from . import physics
14 from . import utils
15
16 import sys
17 import warnings
18
19 if sys.version_info[:2] < (3, 6): # coveralls: ignore
20 warnings.warn("PlasmaPy does not support Python 3.5 and below")
21
[end of plasmapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -5,16 +5,81 @@
author as __author__,
)
-from .classes import Plasma
-from . import classes
-from . import constants
-from . import atomic
-from . import math
-from . import physics
-from . import utils
-
import sys
import warnings
-if sys.version_info[:2] < (3, 6): # coveralls: ignore
+__minimum_python_version__ = '3.6'
+__minimum_numpy_version__ = '1.13.0'
+__minimum_astropy_version__ = '2.0.0'
+
+
+def _split_version(version):
+ return tuple(int(ver) for ver in version.split('.'))
+
+
+def _min_required_version(required, current): # coveralls: ignore
+ """ Return `True` if the current version meets the required minimum
+ version and `False` if not/ if not installed.
+
+ Right now `required` and `current` are just '.' separated strings
+ but it would be good to make this more general and accept modules.
+ """
+ return _split_version(current) >= _split_version(required)
+
+
+def _check_numpy_version(): # coveralls: ignore
+ """ Make sure numpy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ np_ver = None
+
+ try:
+ from numpy import __version__ as np_ver
+ required_version = _min_required_version(__minimum_numpy_version__,
+ np_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Numpy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_numpy_version__, np_ver)
+ raise ImportError(ver_error)
+
+
+def _check_astropy_version(): # coveralls: ignore
+ """ Make sure astropy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ ap_ver = None
+
+ try:
+ from astropy import __version__ as ap_ver
+ required_version = _min_required_version(__minimum_astropy_version__,
+ ap_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Astropy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_astropy_version__, ap_ver)
+ raise ImportError(ver_error)
+
+
+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore
warnings.warn("PlasmaPy does not support Python 3.5 and below")
+
+_check_numpy_version()
+_check_astropy_version()
+
+try:
+ from .classes import Plasma
+ from . import classes
+ from . import constants
+ from . import atomic
+ from . import math
+ from . import physics
+ from . import utils
+except Exception:
+ raise ImportError("Unable to load PlasmaPy subpackages.")
| {"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -5,16 +5,81 @@\n author as __author__,\n )\n \n-from .classes import Plasma\n-from . import classes\n-from . import constants\n-from . import atomic\n-from . import math\n-from . import physics\n-from . import utils\n-\n import sys\n import warnings\n \n-if sys.version_info[:2] < (3, 6): # coveralls: ignore\n+__minimum_python_version__ = '3.6'\n+__minimum_numpy_version__ = '1.13.0'\n+__minimum_astropy_version__ = '2.0.0'\n+\n+\n+def _split_version(version):\n+ return tuple(int(ver) for ver in version.split('.'))\n+\n+\n+def _min_required_version(required, current): # coveralls: ignore\n+ \"\"\" Return `True` if the current version meets the required minimum\n+ version and `False` if not/ if not installed.\n+\n+ Right now `required` and `current` are just '.' separated strings\n+ but it would be good to make this more general and accept modules.\n+ \"\"\"\n+ return _split_version(current) >= _split_version(required)\n+\n+\n+def _check_numpy_version(): # coveralls: ignore\n+ \"\"\" Make sure numpy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ np_ver = None\n+\n+ try:\n+ from numpy import __version__ as np_ver\n+ required_version = _min_required_version(__minimum_numpy_version__,\n+ np_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Numpy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_numpy_version__, np_ver)\n+ raise ImportError(ver_error)\n+\n+\n+def _check_astropy_version(): # coveralls: ignore\n+ \"\"\" Make sure astropy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ ap_ver = None\n+\n+ try:\n+ from astropy import __version__ as ap_ver\n+ required_version = _min_required_version(__minimum_astropy_version__,\n+ ap_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Astropy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_astropy_version__, ap_ver)\n+ raise ImportError(ver_error)\n+\n+\n+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n+\n+_check_numpy_version()\n+_check_astropy_version()\n+\n+try:\n+ from .classes import Plasma\n+ from . import classes\n+ from . import constants\n+ from . import atomic\n+ from . import math\n+ from . import physics\n+ from . import utils\n+except Exception:\n+ raise ImportError(\"Unable to load PlasmaPy subpackages.\")\n", "issue": "Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy\nWe decided that PlasmaPy will only be supported for:\r\n- Python version > 3.6\r\n- Astropy version > 2.0\r\n- NumPy version > 1.13\r\n\r\nHowever, when I try to run:\r\n```ShellSession\r\npython setup.py install\r\n```\r\nfrom the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.\r\n\r\nWhen I try to run\r\n```Python\r\nimport plasmapy\r\n```\r\nin Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.\r\n\r\nWe should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.\r\n\r\nThank you!\r\nNick\n", "before_files": [{"content": "from ._metadata import (\n name as __name__,\n version as __version__,\n description as __doc__,\n author as __author__,\n)\n\nfrom .classes import Plasma\nfrom . import classes\nfrom . import constants\nfrom . import atomic\nfrom . import math\nfrom . import physics\nfrom . import utils\n\nimport sys\nimport warnings\n\nif sys.version_info[:2] < (3, 6): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n", "path": "plasmapy/__init__.py"}]} | 935 | 720 |
gh_patches_debug_36409 | rasdani/github-patches | git_diff | streamlit__streamlit-682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Streamlit 0.49.0 on Windows + Python 3.8 fails to execute (Tornado error)
# Summary
Streamlit fails to execute on Windows under Python 3.8 due to a bug in Tornado. The version of Tornado pinned in streamlit 0.49 was 5.x, while the latest version of Tornado at the time of this bug was 6.0.3. [A similar bug was found in IPython notebook.](https://stackoverflow.com/questions/58422817/jupyter-notebook-with-python-3-8-notimplementederror)
# Steps to reproduce
What are the steps we should take to reproduce the bug:
1. Setup Python 3.8 virtualenv on Windows
2. Install streamlit 0.49.0
3. streamlit hello
## Expected behavior:
Streamlit hello should run.
## Actual behavior:
Streamlit fails to execute, spitting out the following (tail end of traceback -- [see full traceback here](https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724)):
```
self._handlers[sock.fileno()] = add_accept_handler(
File “c:\users\admin\appdata\local\programs\python\python38-32\lib\site-packages\tornado\netutil.py”, line 268, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File “c:\users\admin\appdata\local\programs\python\python38-32\lib\site-packages\tornado\platform\asyncio.py”, line 79, in add_handler
self.asyncio_loop.add_reader(
File “c:\users\admin\appdata\local\programs\python\python38-32\lib\asyncio\events.py”, line 501, in add_reader
raise NotImplementedError
NotImplementedError
```
## Is this a regression?
No; Python 3.8 hasn't been officially supported in Streamlit to date. (But it _should_ work.)
# Debug info
- Streamlit version: 0.49.0
- Python version: 3.8
- Using Conda? PipEnv? PyEnv? Pex? any
- OS version: Windows (probably any)
- Browser version: n/a
# Additional information
Using Python 3.7.5 is the recommended solution for now. See https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724/4
</issue>
<code>
[start of lib/streamlit/bootstrap.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2018-2019 Streamlit Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17 import signal
18 import sys
19
20 import click
21 import tornado.ioloop
22
23 from streamlit import config
24 from streamlit import net_util
25 from streamlit import url_util
26 from streamlit import util
27 from streamlit.Report import Report
28 from streamlit.logger import get_logger
29 from streamlit.server.Server import Server
30
31 LOGGER = get_logger(__name__)
32
33
34 # Wait for 1 second before opening a browser. This gives old tabs a chance to
35 # reconnect.
36 # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.
37 BROWSER_WAIT_TIMEOUT_SEC = 1
38
39
40 def _set_up_signal_handler():
41 LOGGER.debug("Setting up signal handler")
42
43 def signal_handler(signal_number, stack_frame):
44 # The server will shut down its threads and stop the ioloop
45 Server.get_current().stop()
46
47 signal.signal(signal.SIGTERM, signal_handler)
48 signal.signal(signal.SIGINT, signal_handler)
49 if sys.platform == "win32":
50 signal.signal(signal.SIGBREAK, signal_handler)
51 else:
52 signal.signal(signal.SIGQUIT, signal_handler)
53
54
55 def _fix_sys_path(script_path):
56 """Add the script's folder to the sys path.
57
58 Python normally does this automatically, but since we exec the script
59 ourselves we need to do it instead.
60 """
61 sys.path.insert(0, os.path.dirname(script_path))
62
63
64 def _fix_matplotlib_crash():
65 """Set Matplotlib backend to avoid a crash.
66
67 The default Matplotlib backend crashes Python on OSX when run on a thread
68 that's not the main thread, so here we set a safer backend as a fix.
69 Users can always disable this behavior by setting the config
70 runner.fixMatplotlib = false.
71
72 This fix is OS-independent. We didn't see a good reason to make this
73 Mac-only. Consistency within Streamlit seemed more important.
74 """
75 if config.get_option("runner.fixMatplotlib"):
76 try:
77 # TODO: a better option may be to set
78 # os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards
79 # the top of __init__.py, before importing anything that imports
80 # pandas (which imports matplotlib). Alternately, we could set
81 # this environment variable in a new entrypoint defined in
82 # setup.py. Both of these introduce additional trickiness: they
83 # need to run without consulting streamlit.config.get_option,
84 # because this would import streamlit, and therefore matplotlib.
85 import matplotlib
86
87 matplotlib.use("Agg")
88 except ImportError:
89 pass
90
91
92 def _fix_sys_argv(script_path, args):
93 """sys.argv needs to exclude streamlit arguments and parameters
94 and be set to what a user's script may expect.
95 """
96 import sys
97
98 sys.argv = [script_path] + list(args)
99
100
101 def _on_server_start(server):
102 _print_url()
103
104 def maybe_open_browser():
105 if config.get_option("server.headless"):
106 # Don't open browser when in headless mode.
107 return
108
109 if server.browser_is_connected:
110 # Don't auto-open browser if there's already a browser connected.
111 # This can happen if there's an old tab repeatedly trying to
112 # connect, and it happens to success before we launch the browser.
113 return
114
115 if config.is_manually_set("browser.serverAddress"):
116 addr = config.get_option("browser.serverAddress")
117 else:
118 addr = "localhost"
119
120 util.open_browser(Report.get_url(addr))
121
122 # Schedule the browser to open using the IO Loop on the main thread, but
123 # only if no other browser connects within 1s.
124 ioloop = tornado.ioloop.IOLoop.current()
125 ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)
126
127
128 def _print_url():
129 title_message = "You can now view your Streamlit app in your browser."
130 named_urls = []
131
132 if config.is_manually_set("browser.serverAddress"):
133 named_urls = [
134 ("URL", Report.get_url(config.get_option("browser.serverAddress")))
135 ]
136
137 elif config.get_option("server.headless"):
138 named_urls = [
139 ("Network URL", Report.get_url(net_util.get_internal_ip())),
140 ("External URL", Report.get_url(net_util.get_external_ip())),
141 ]
142
143 else:
144 named_urls = [
145 ("Local URL", Report.get_url("localhost")),
146 ("Network URL", Report.get_url(net_util.get_internal_ip())),
147 ]
148
149 click.secho("")
150 click.secho(" %s" % title_message, fg="blue", bold=True)
151 click.secho("")
152
153 for url_name, url in named_urls:
154 url_util.print_url(url_name, url)
155
156 click.secho("")
157
158
159 def run(script_path, command_line, args):
160 """Run a script in a separate thread and start a server for the app.
161
162 This starts a blocking ioloop.
163
164 Parameters
165 ----------
166 script_path : str
167 command_line : str
168 args : [str]
169
170 """
171 _fix_sys_path(script_path)
172 _fix_matplotlib_crash()
173 _fix_sys_argv(script_path, args)
174
175 # Install a signal handler that will shut down the ioloop
176 # and close all our threads
177 _set_up_signal_handler()
178
179 ioloop = tornado.ioloop.IOLoop.current()
180
181 # Create and start the server.
182 server = Server(ioloop, script_path, command_line)
183 server.start(_on_server_start)
184
185 # (Must com after start(), because this starts a new thread and start() may
186 # call sys.exit() which doesn't kill other threads.
187 server.add_preheated_report_session()
188
189 # Start the ioloop. This function will not return until the
190 # server is shut down.
191 ioloop.start()
192
[end of lib/streamlit/bootstrap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py
--- a/lib/streamlit/bootstrap.py
+++ b/lib/streamlit/bootstrap.py
@@ -23,6 +23,7 @@
from streamlit import config
from streamlit import net_util
from streamlit import url_util
+from streamlit import env_util
from streamlit import util
from streamlit.Report import Report
from streamlit.logger import get_logger
@@ -30,7 +31,6 @@
LOGGER = get_logger(__name__)
-
# Wait for 1 second before opening a browser. This gives old tabs a chance to
# reconnect.
# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.
@@ -89,6 +89,43 @@
pass
+def _fix_tornado_crash():
+ """Set default asyncio policy to be compatible with Tornado 6.
+
+ Tornado 6 (at least) is not compatible with the default
+ asyncio implementation on Windows. So here we
+ pick the older SelectorEventLoopPolicy when the OS is Windows
+ if the known-incompatible default policy is in use.
+
+ This has to happen as early as possible to make it a low priority and
+ overrideable
+
+ See: https://github.com/tornadoweb/tornado/issues/2608
+
+ FIXME: if/when tornado supports the defaults in asyncio,
+ remove and bump tornado requirement for py38
+ """
+ if env_util.IS_WINDOWS and sys.version_info >= (3, 8):
+ import asyncio
+ try:
+ from asyncio import (
+ WindowsProactorEventLoopPolicy,
+ WindowsSelectorEventLoopPolicy,
+ )
+ except ImportError:
+ pass
+ # Not affected
+ else:
+ if (
+ type(asyncio.get_event_loop_policy()) is
+ WindowsProactorEventLoopPolicy
+ ):
+ # WindowsProactorEventLoopPolicy is not compatible with
+ # Tornado 6 fallback to the pre-3.8 default of Selector
+ asyncio.set_event_loop_policy(
+ WindowsSelectorEventLoopPolicy())
+
+
def _fix_sys_argv(script_path, args):
"""sys.argv needs to exclude streamlit arguments and parameters
and be set to what a user's script may expect.
@@ -170,6 +207,7 @@
"""
_fix_sys_path(script_path)
_fix_matplotlib_crash()
+ _fix_tornado_crash()
_fix_sys_argv(script_path, args)
# Install a signal handler that will shut down the ioloop
| {"golden_diff": "diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py\n--- a/lib/streamlit/bootstrap.py\n+++ b/lib/streamlit/bootstrap.py\n@@ -23,6 +23,7 @@\n from streamlit import config\n from streamlit import net_util\n from streamlit import url_util\n+from streamlit import env_util\n from streamlit import util\n from streamlit.Report import Report\n from streamlit.logger import get_logger\n@@ -30,7 +31,6 @@\n \n LOGGER = get_logger(__name__)\n \n-\n # Wait for 1 second before opening a browser. This gives old tabs a chance to\n # reconnect.\n # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\n@@ -89,6 +89,43 @@\n pass\n \n \n+def _fix_tornado_crash():\n+ \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n+\n+ Tornado 6 (at least) is not compatible with the default\n+ asyncio implementation on Windows. So here we\n+ pick the older SelectorEventLoopPolicy when the OS is Windows\n+ if the known-incompatible default policy is in use.\n+\n+ This has to happen as early as possible to make it a low priority and\n+ overrideable\n+\n+ See: https://github.com/tornadoweb/tornado/issues/2608\n+\n+ FIXME: if/when tornado supports the defaults in asyncio,\n+ remove and bump tornado requirement for py38\n+ \"\"\"\n+ if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n+ import asyncio\n+ try:\n+ from asyncio import (\n+ WindowsProactorEventLoopPolicy,\n+ WindowsSelectorEventLoopPolicy,\n+ )\n+ except ImportError:\n+ pass\n+ # Not affected\n+ else:\n+ if (\n+ type(asyncio.get_event_loop_policy()) is\n+ WindowsProactorEventLoopPolicy\n+ ):\n+ # WindowsProactorEventLoopPolicy is not compatible with\n+ # Tornado 6 fallback to the pre-3.8 default of Selector\n+ asyncio.set_event_loop_policy(\n+ WindowsSelectorEventLoopPolicy())\n+\n+\n def _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n@@ -170,6 +207,7 @@\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n+ _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n \n # Install a signal handler that will shut down the ioloop\n", "issue": "Streamlit 0.49.0 on Windows + Python 3.8 fails to execute (Tornado error)\n# Summary\r\nStreamlit fails to execute on Windows under Python 3.8 due to a bug in Tornado. The version of Tornado pinned in streamlit 0.49 was 5.x, while the latest version of Tornado at the time of this bug was 6.0.3. [A similar bug was found in IPython notebook.](https://stackoverflow.com/questions/58422817/jupyter-notebook-with-python-3-8-notimplementederror)\r\n\r\n# Steps to reproduce\r\nWhat are the steps we should take to reproduce the bug:\r\n1. Setup Python 3.8 virtualenv on Windows\r\n2. Install streamlit 0.49.0\r\n3. streamlit hello\r\n\r\n## Expected behavior:\r\nStreamlit hello should run.\r\n\r\n## Actual behavior:\r\nStreamlit fails to execute, spitting out the following (tail end of traceback -- [see full traceback here](https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724)):\r\n\r\n```\r\nself._handlers[sock.fileno()] = add_accept_handler(\r\nFile \u201cc:\\users\\admin\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\tornado\\netutil.py\u201d, line 268, in add_accept_handler\r\nio_loop.add_handler(sock, accept_handler, IOLoop.READ)\r\nFile \u201cc:\\users\\admin\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\tornado\\platform\\asyncio.py\u201d, line 79, in add_handler\r\nself.asyncio_loop.add_reader(\r\nFile \u201cc:\\users\\admin\\appdata\\local\\programs\\python\\python38-32\\lib\\asyncio\\events.py\u201d, line 501, in add_reader\r\nraise NotImplementedError\r\nNotImplementedError\r\n```\r\n\r\n## Is this a regression?\r\n\r\nNo; Python 3.8 hasn't been officially supported in Streamlit to date. (But it _should_ work.)\r\n\r\n# Debug info\r\n- Streamlit version: 0.49.0\r\n- Python version: 3.8\r\n- Using Conda? PipEnv? PyEnv? Pex? any\r\n- OS version: Windows (probably any)\r\n- Browser version: n/a\r\n\r\n# Additional information\r\n\r\nUsing Python 3.7.5 is the recommended solution for now. See https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724/4\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\n\nimport click\nimport tornado.ioloop\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import util\nfrom streamlit.Report import Report\nfrom streamlit.logger import get_logger\nfrom streamlit.server.Server import Server\n\nLOGGER = get_logger(__name__)\n\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _print_url()\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif config.get_option(\"server.headless\"):\n named_urls = [\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n (\"External URL\", Report.get_url(net_util.get_external_ip())),\n ]\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n ]\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n\ndef run(script_path, command_line, args):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_sys_argv(script_path, args)\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must com after start(), because this starts a new thread and start() may\n # call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n", "path": "lib/streamlit/bootstrap.py"}]} | 2,982 | 584 |
gh_patches_debug_60412 | rasdani/github-patches | git_diff | coala__coala-4215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support neovim (`nvim`) as an editor
```
[WARNING][14:15:54] The editor "nvim" is unknown to coala. Files won't be opened at the correct positions and other quirks might occur. Consider opening an issue at https://github.com/coala/coala/issues so we can add support for this editor. Supported editors are: atom, emacs, emacsclient, geany, gedit, gvim, kate, nano, subl, vim, xed
```
It's basically the same as `vim` so it could be just added to the allowed editors list and given the same behavior as `vim`.
</issue>
<code>
[start of coalib/results/result_actions/OpenEditorAction.py]
1 import logging
2 import shlex
3 import subprocess
4 from os.path import exists
5 from os import environ
6
7 from coalib.results.Diff import Diff
8 from coalib.results.Result import Result
9 from coalib.results.result_actions.ResultAction import ResultAction
10 from coala_utils.decorators import enforce_signature
11
12
13 """
14 Data about all text editors coala knows about. New editors
15 can just be added here.
16 For each editor the following info is stored:
17 {
18 <name/comand>: {
19 "file_arg_template":
20 A string used to generate arguments to open a file.
21 Must at least have the placeholder 'filename'
22 and can optionally use 'line' and 'column'
23 to open the file at the correct position.
24 Some editors don't support opening files at
25 a certain position if multiple files are
26 to be opened, but we try to do so anyway.
27 "args":
28 General arguments added to the call, e.g. to
29 force opening of a new window.
30 "gui":
31 Boolean. True if this is a gui editor.
32 Optional, defaults to False.
33 }
34 }
35 """
36 KNOWN_EDITORS = {
37 # non-gui editors
38 'vim': {
39 'file_arg_template': '{filename} +{line}',
40 'gui': False
41 },
42 'nano': {
43 'file_arg_template': '+{line},{column} {filename} ',
44 'gui': False
45 },
46 'emacs': {
47 'file_arg_template': '+{line}:{column} {filename}',
48 'gui': False
49 },
50 'emacsclient': {
51 'file_arg_template': '+{line}:{column} {filename}',
52 'gui': False
53 },
54
55 # gui editors
56 'atom': {
57 'file_arg_template': '{filename}:{line}:{column}',
58 'args': '--wait',
59 'gui': True
60 },
61 'geany': {
62 'file_arg_template': '{filename} -l {line} --column {column}',
63 'args': '-s -i',
64 'gui': True
65 },
66 'gedit': {
67 'file_arg_template': '{filename} +{line}',
68 'args': '-s',
69 'gui': True
70 },
71 'gvim': {
72 'file_arg_template': '{filename} +{line}',
73 'gui': True
74 },
75 'kate': {
76 'file_arg_template': '{filename} -l {line} -c {column}',
77 'args': '--new',
78 'gui': True
79 },
80 'subl': {
81 'file_arg_template': '{filename}:{line}:{column}',
82 'args': '--wait',
83 'gui': True
84 },
85 'xed': {
86 'file_arg_template': '{filename} +{line}',
87 'args': '--new-window',
88 'gui': True
89 },
90 }
91
92
93 class OpenEditorAction(ResultAction):
94
95 SUCCESS_MESSAGE = 'Changes saved successfully.'
96
97 @staticmethod
98 @enforce_signature
99 def is_applicable(result: Result, original_file_dict, file_diff_dict):
100 """
101 For being applicable, the result has to point to a number of files
102 that have to exist i.e. have not been previously deleted.
103 """
104
105 if not len(result.affected_code) > 0:
106 return 'The result is not associated with any source code.'
107
108 filenames = set(src.renamed_file(file_diff_dict)
109 for src in result.affected_code)
110 if not all(exists(filename) for filename in filenames):
111 return ("The result is associated with source code that doesn't "
112 'seem to exist.')
113 return True
114
115 def build_editor_call_args(self, editor, editor_info, filenames):
116 """
117 Create argument list which will then be used to open an editor for
118 the given files at the correct positions, if applicable.
119
120 :param editor:
121 The editor to open the file with.
122 :param editor_info:
123 A dict containing the keys ``args`` and ``file_arg_template``,
124 providing additional call arguments and a template to open
125 files at a position for this editor.
126 :param filenames:
127 A dict holding one entry for each file to be opened.
128 Keys must be ``filename``, ``line`` and ``column``.
129 """
130 call_args = [editor]
131
132 # for some editors we define extra arguments
133 if 'args' in editor_info:
134 call_args += shlex.split(editor_info['args'])
135
136 # add info for each file to be opened
137 for file_info in filenames.values():
138 file_arg = editor_info['file_arg_template'].format(
139 filename=shlex.quote(file_info['filename']),
140 line=file_info['line'], column=file_info['column']
141 )
142 call_args += shlex.split(file_arg)
143
144 return call_args
145
146 def apply(self, result, original_file_dict, file_diff_dict, editor: str):
147 """
148 Open file(s)
149
150 :param editor: The editor to open the file with.
151 """
152 try:
153 editor_info = KNOWN_EDITORS[editor.strip()]
154 except KeyError:
155 # If the editor is unknown fall back to just passing
156 # the filenames and emit a warning
157 logging.warning(
158 'The editor "{editor}" is unknown to coala. Files won\'t be'
159 ' opened at the correct positions and other quirks might'
160 ' occur. Consider opening an issue at'
161 ' https://github.com/coala/coala/issues so we'
162 ' can add support for this editor.'
163 ' Supported editors are: {supported}'.format(
164 editor=editor, supported=', '.join(
165 sorted(KNOWN_EDITORS.keys())
166 )
167 )
168 )
169 editor_info = {
170 'file_arg_template': '{filename}',
171 'gui': False
172 }
173
174 # Use dict to remove duplicates
175 filenames = {
176 src.file: {
177 'filename': src.renamed_file(file_diff_dict),
178 'line': src.start.line or 1,
179 'column': src.start.column or 1
180 }
181 for src in result.affected_code
182 }
183
184 call_args = self.build_editor_call_args(editor, editor_info, filenames)
185
186 if editor_info.get('gui', True):
187 subprocess.call(call_args, stdout=subprocess.PIPE)
188 else:
189 subprocess.call(call_args)
190
191 for original_name, file_info in filenames.items():
192 filename = file_info['filename']
193 with open(filename, encoding='utf-8') as file:
194 file_diff_dict[original_name] = Diff.from_string_arrays(
195 original_file_dict[original_name], file.readlines(),
196 rename=False if original_name == filename else filename)
197
198 return file_diff_dict
199
200 if 'EDITOR' in environ:
201 apply.__defaults__ = (environ['EDITOR'],)
202
[end of coalib/results/result_actions/OpenEditorAction.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/results/result_actions/OpenEditorAction.py b/coalib/results/result_actions/OpenEditorAction.py
--- a/coalib/results/result_actions/OpenEditorAction.py
+++ b/coalib/results/result_actions/OpenEditorAction.py
@@ -39,6 +39,10 @@
'file_arg_template': '{filename} +{line}',
'gui': False
},
+ 'nvim': {
+ 'file_arg_template': '{filename} +{line}',
+ 'gui': False
+ },
'nano': {
'file_arg_template': '+{line},{column} {filename} ',
'gui': False
| {"golden_diff": "diff --git a/coalib/results/result_actions/OpenEditorAction.py b/coalib/results/result_actions/OpenEditorAction.py\n--- a/coalib/results/result_actions/OpenEditorAction.py\n+++ b/coalib/results/result_actions/OpenEditorAction.py\n@@ -39,6 +39,10 @@\n 'file_arg_template': '{filename} +{line}',\n 'gui': False\n },\n+ 'nvim': {\n+ 'file_arg_template': '{filename} +{line}',\n+ 'gui': False\n+ },\n 'nano': {\n 'file_arg_template': '+{line},{column} {filename} ',\n 'gui': False\n", "issue": "Support neovim (`nvim`) as an editor\n```\r\n[WARNING][14:15:54] The editor \"nvim\" is unknown to coala. Files won't be opened at the correct positions and other quirks might occur. Consider opening an issue at https://github.com/coala/coala/issues so we can add support for this editor. Supported editors are: atom, emacs, emacsclient, geany, gedit, gvim, kate, nano, subl, vim, xed\r\n```\r\n\r\nIt's basically the same as `vim` so it could be just added to the allowed editors list and given the same behavior as `vim`.\n", "before_files": [{"content": "import logging\nimport shlex\nimport subprocess\nfrom os.path import exists\nfrom os import environ\n\nfrom coalib.results.Diff import Diff\nfrom coalib.results.Result import Result\nfrom coalib.results.result_actions.ResultAction import ResultAction\nfrom coala_utils.decorators import enforce_signature\n\n\n\"\"\"\nData about all text editors coala knows about. New editors\ncan just be added here.\nFor each editor the following info is stored:\n{\n <name/comand>: {\n \"file_arg_template\":\n A string used to generate arguments to open a file.\n Must at least have the placeholder 'filename'\n and can optionally use 'line' and 'column'\n to open the file at the correct position.\n Some editors don't support opening files at\n a certain position if multiple files are\n to be opened, but we try to do so anyway.\n \"args\":\n General arguments added to the call, e.g. to\n force opening of a new window.\n \"gui\":\n Boolean. True if this is a gui editor.\n Optional, defaults to False.\n }\n}\n\"\"\"\nKNOWN_EDITORS = {\n # non-gui editors\n 'vim': {\n 'file_arg_template': '{filename} +{line}',\n 'gui': False\n },\n 'nano': {\n 'file_arg_template': '+{line},{column} {filename} ',\n 'gui': False\n },\n 'emacs': {\n 'file_arg_template': '+{line}:{column} {filename}',\n 'gui': False\n },\n 'emacsclient': {\n 'file_arg_template': '+{line}:{column} {filename}',\n 'gui': False\n },\n\n # gui editors\n 'atom': {\n 'file_arg_template': '{filename}:{line}:{column}',\n 'args': '--wait',\n 'gui': True\n },\n 'geany': {\n 'file_arg_template': '{filename} -l {line} --column {column}',\n 'args': '-s -i',\n 'gui': True\n },\n 'gedit': {\n 'file_arg_template': '{filename} +{line}',\n 'args': '-s',\n 'gui': True\n },\n 'gvim': {\n 'file_arg_template': '{filename} +{line}',\n 'gui': True\n },\n 'kate': {\n 'file_arg_template': '{filename} -l {line} -c {column}',\n 'args': '--new',\n 'gui': True\n },\n 'subl': {\n 'file_arg_template': '{filename}:{line}:{column}',\n 'args': '--wait',\n 'gui': True\n },\n 'xed': {\n 'file_arg_template': '{filename} +{line}',\n 'args': '--new-window',\n 'gui': True\n },\n}\n\n\nclass OpenEditorAction(ResultAction):\n\n SUCCESS_MESSAGE = 'Changes saved successfully.'\n\n @staticmethod\n @enforce_signature\n def is_applicable(result: Result, original_file_dict, file_diff_dict):\n \"\"\"\n For being applicable, the result has to point to a number of files\n that have to exist i.e. have not been previously deleted.\n \"\"\"\n\n if not len(result.affected_code) > 0:\n return 'The result is not associated with any source code.'\n\n filenames = set(src.renamed_file(file_diff_dict)\n for src in result.affected_code)\n if not all(exists(filename) for filename in filenames):\n return (\"The result is associated with source code that doesn't \"\n 'seem to exist.')\n return True\n\n def build_editor_call_args(self, editor, editor_info, filenames):\n \"\"\"\n Create argument list which will then be used to open an editor for\n the given files at the correct positions, if applicable.\n\n :param editor:\n The editor to open the file with.\n :param editor_info:\n A dict containing the keys ``args`` and ``file_arg_template``,\n providing additional call arguments and a template to open\n files at a position for this editor.\n :param filenames:\n A dict holding one entry for each file to be opened.\n Keys must be ``filename``, ``line`` and ``column``.\n \"\"\"\n call_args = [editor]\n\n # for some editors we define extra arguments\n if 'args' in editor_info:\n call_args += shlex.split(editor_info['args'])\n\n # add info for each file to be opened\n for file_info in filenames.values():\n file_arg = editor_info['file_arg_template'].format(\n filename=shlex.quote(file_info['filename']),\n line=file_info['line'], column=file_info['column']\n )\n call_args += shlex.split(file_arg)\n\n return call_args\n\n def apply(self, result, original_file_dict, file_diff_dict, editor: str):\n \"\"\"\n Open file(s)\n\n :param editor: The editor to open the file with.\n \"\"\"\n try:\n editor_info = KNOWN_EDITORS[editor.strip()]\n except KeyError:\n # If the editor is unknown fall back to just passing\n # the filenames and emit a warning\n logging.warning(\n 'The editor \"{editor}\" is unknown to coala. Files won\\'t be'\n ' opened at the correct positions and other quirks might'\n ' occur. Consider opening an issue at'\n ' https://github.com/coala/coala/issues so we'\n ' can add support for this editor.'\n ' Supported editors are: {supported}'.format(\n editor=editor, supported=', '.join(\n sorted(KNOWN_EDITORS.keys())\n )\n )\n )\n editor_info = {\n 'file_arg_template': '{filename}',\n 'gui': False\n }\n\n # Use dict to remove duplicates\n filenames = {\n src.file: {\n 'filename': src.renamed_file(file_diff_dict),\n 'line': src.start.line or 1,\n 'column': src.start.column or 1\n }\n for src in result.affected_code\n }\n\n call_args = self.build_editor_call_args(editor, editor_info, filenames)\n\n if editor_info.get('gui', True):\n subprocess.call(call_args, stdout=subprocess.PIPE)\n else:\n subprocess.call(call_args)\n\n for original_name, file_info in filenames.items():\n filename = file_info['filename']\n with open(filename, encoding='utf-8') as file:\n file_diff_dict[original_name] = Diff.from_string_arrays(\n original_file_dict[original_name], file.readlines(),\n rename=False if original_name == filename else filename)\n\n return file_diff_dict\n\n if 'EDITOR' in environ:\n apply.__defaults__ = (environ['EDITOR'],)\n", "path": "coalib/results/result_actions/OpenEditorAction.py"}]} | 2,630 | 144 |
gh_patches_debug_15290 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-390 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DOC: shorten package path
</issue>
<code>
[start of dtoolkit/geoaccessor/register.py]
1 from pandas.util._decorators import doc
2
3 from dtoolkit.accessor.register import register_method_factory
4 from dtoolkit.geoaccessor.accessor import register_geodataframe_accessor
5 from dtoolkit.geoaccessor.accessor import register_geoseries_accessor
6
7
8 @register_method_factory
9 @doc(klass=":class:`geopandas.GeoSeries`")
10 def register_geoseries_method(method):
11 """
12 {klass} register accessor for human.
13
14 Write method normally, use method naturally.
15
16 See Also
17 --------
18 dtoolkit.geoaccessor.accessor.register_geoseries_accessor
19 dtoolkit.geoaccessor.accessor.register_geodataframe_accessor
20 register_geoseries_method
21 register_geodataframe_method
22
23 Examples
24 --------
25 In your library code::
26
27 import geopandas as gpd
28
29 from pygeos import count_coordinates, from_shapely
30
31 @register_geodataframe_method
32 @register_geoseries_method
33 def counts(s: gpd.GeoSeries):
34 # Counts the number of coordinate pairs in geometry
35
36 func = lambda x: count_coordinates(from_shapely(x))
37 return s.geometry.apply(func)
38
39 Back in an interactive IPython session:
40
41 .. code-block:: ipython
42
43 In [1]: import geopandas as gpd
44
45 In [2]: s = gpd.GeoSeries.from_wkt(["POINT (0 0)", "POINT (1 1)", None])
46
47 In [3]: s
48 Out[3]:
49 0 POINT (0.00000 0.00000)
50 1 POINT (1.00000 1.00000)
51 2 None
52 dtype: geometry
53
54 In [4]: s.counts()
55 Out[4]:
56 0 1
57 1 1
58 2 0
59 dtype: int64
60
61 In [5]: d = s.to_frame("geometry")
62 Out[5]:
63 geometry
64 0 POINT (0.00000 0.00000)
65 1 POINT (1.00000 1.00000)
66 2 None
67
68 In [6]: d.counts()
69 Out[6]:
70 0 1
71 1 1
72 2 0
73 Name: geometry, dtype: int64
74 """
75 return register_geoseries_accessor(method)
76
77
78 @register_method_factory
79 @doc(register_geoseries_method, klass=":class:`geopandas.GeoDataFrame`")
80 def register_geodataframe_method(method):
81 return register_geodataframe_accessor(method)
82
[end of dtoolkit/geoaccessor/register.py]
[start of dtoolkit/geoaccessor/accessor.py]
1 from geopandas import GeoDataFrame
2 from geopandas import GeoSeries
3 from pandas.core.accessor import _register_accessor
4 from pandas.util._decorators import doc
5
6
7 @doc(klass=":class:`geopandas.GeoSeries`")
8 def register_geoseries_accessor(name: str):
9 """
10 Register a custom accessor on {klass} objects.
11
12 This is a temparatory solution to hook method into
13 :class:`~geopandas.GeoSeries` or :class:`~geopandas.GeoDataFrame`.
14 If `geopandas#1952`_ done, it would be removed from
15 :mod:`dtoolkit.geoaccessor`.
16
17 .. _geopandas#1952: https://github.com/geopandas/geopandas/pull/1952
18
19 Parameters
20 ----------
21 name : str
22 Name under which the accessor should be registered. A warning is issued
23 if this name conflicts with a preexisting attribute.
24
25 Returns
26 -------
27 callable
28 A class decorator.
29
30 See Also
31 --------
32 register_geoseries_accessor
33 register_geodataframe_accessor
34 dtoolkit.geoaccessor.register.register_geoseries_method
35 dtoolkit.geoaccessor.register.register_geodataframe_method
36
37 Notes
38 -----
39 When accessed, your accessor will be initialized with the geopandas object
40 the user is interacting with. So the signature must be::
41
42 def __init__(self, geopandas_object): # noqa: E999
43 ...
44
45 For consistency with geopandas methods, you should raise an
46 ``AttributeError`` if the data passed to your accessor has an incorrect
47 dtype.
48
49 >>> import geopandas as gpd
50 >>> gpd.GeoSeries().dt
51 Traceback (most recent call last):
52 ...
53 AttributeError: Can only use .dt accessor with datetimelike values
54
55 Examples
56 --------
57 In your library code::
58
59 from pygeos import count_coordinates, from_shapely
60
61 @register_geodataframe_accessor("coords")
62 @register_geoseries_accessor("coords")
63 class CoordinateAccessor:
64 def __init__(self, gpd_obj):
65 self._obj = gpd_obj
66 self.geometry = gpd_obj.geometry
67
68 @property
69 def count_coordinates(self):
70 # Counts the number of coordinate pairs in geometry
71
72 return self.geometry.apply(count_coordinates(from_shapely(x)))
73
74 Back in an interactive IPython session:
75
76 .. code-block:: ipython
77
78 In [1]: import geopandas as gpd
79
80 In [2]: s = gpd.GeoSeries.from_wkt(["POINT (0 0)", "POINT (1 1)", None])
81
82 In [3]: s
83 Out[3]:
84 0 POINT (0.00000 0.00000)
85 1 POINT (1.00000 1.00000)
86 2 None
87 dtype: geometry
88
89 In [4]: s.coords.count_coordinates
90 Out[4]:
91 0 1
92 1 1
93 2 0
94 dtype: int64
95
96 In [5]: d = s.to_frame("geometry")
97 Out[5]:
98 geometry
99 0 POINT (0.00000 0.00000)
100 1 POINT (1.00000 1.00000)
101 2 None
102
103 In [6]: d.coords.count_coordinates
104 Out[6]:
105 0 1
106 1 1
107 2 0
108 Name: geometry, dtype: int64
109 """
110
111 return _register_accessor(name, GeoSeries)
112
113
114 @doc(register_geoseries_accessor, klass=":class:`geopandas.GeoDataFrame`")
115 def register_geodataframe_accessor(name: str):
116
117 return _register_accessor(name, GeoDataFrame)
118
[end of dtoolkit/geoaccessor/accessor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dtoolkit/geoaccessor/accessor.py b/dtoolkit/geoaccessor/accessor.py
--- a/dtoolkit/geoaccessor/accessor.py
+++ b/dtoolkit/geoaccessor/accessor.py
@@ -31,8 +31,8 @@
--------
register_geoseries_accessor
register_geodataframe_accessor
- dtoolkit.geoaccessor.register.register_geoseries_method
- dtoolkit.geoaccessor.register.register_geodataframe_method
+ dtoolkit.geoaccessor.register_geoseries_method
+ dtoolkit.geoaccessor.register_geodataframe_method
Notes
-----
diff --git a/dtoolkit/geoaccessor/register.py b/dtoolkit/geoaccessor/register.py
--- a/dtoolkit/geoaccessor/register.py
+++ b/dtoolkit/geoaccessor/register.py
@@ -15,8 +15,8 @@
See Also
--------
- dtoolkit.geoaccessor.accessor.register_geoseries_accessor
- dtoolkit.geoaccessor.accessor.register_geodataframe_accessor
+ dtoolkit.geoaccessor.register_geoseries_accessor
+ dtoolkit.geoaccessor.register_geodataframe_accessor
register_geoseries_method
register_geodataframe_method
| {"golden_diff": "diff --git a/dtoolkit/geoaccessor/accessor.py b/dtoolkit/geoaccessor/accessor.py\n--- a/dtoolkit/geoaccessor/accessor.py\n+++ b/dtoolkit/geoaccessor/accessor.py\n@@ -31,8 +31,8 @@\n --------\n register_geoseries_accessor\n register_geodataframe_accessor\n- dtoolkit.geoaccessor.register.register_geoseries_method\n- dtoolkit.geoaccessor.register.register_geodataframe_method\n+ dtoolkit.geoaccessor.register_geoseries_method\n+ dtoolkit.geoaccessor.register_geodataframe_method\n \n Notes\n -----\ndiff --git a/dtoolkit/geoaccessor/register.py b/dtoolkit/geoaccessor/register.py\n--- a/dtoolkit/geoaccessor/register.py\n+++ b/dtoolkit/geoaccessor/register.py\n@@ -15,8 +15,8 @@\n \n See Also\n --------\n- dtoolkit.geoaccessor.accessor.register_geoseries_accessor\n- dtoolkit.geoaccessor.accessor.register_geodataframe_accessor\n+ dtoolkit.geoaccessor.register_geoseries_accessor\n+ dtoolkit.geoaccessor.register_geodataframe_accessor\n register_geoseries_method\n register_geodataframe_method\n", "issue": "DOC: shorten package path\n\n", "before_files": [{"content": "from pandas.util._decorators import doc\n\nfrom dtoolkit.accessor.register import register_method_factory\nfrom dtoolkit.geoaccessor.accessor import register_geodataframe_accessor\nfrom dtoolkit.geoaccessor.accessor import register_geoseries_accessor\n\n\n@register_method_factory\n@doc(klass=\":class:`geopandas.GeoSeries`\")\ndef register_geoseries_method(method):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n See Also\n --------\n dtoolkit.geoaccessor.accessor.register_geoseries_accessor\n dtoolkit.geoaccessor.accessor.register_geodataframe_accessor\n register_geoseries_method\n register_geodataframe_method\n\n Examples\n --------\n In your library code::\n\n import geopandas as gpd\n\n from pygeos import count_coordinates, from_shapely\n\n @register_geodataframe_method\n @register_geoseries_method\n def counts(s: gpd.GeoSeries):\n # Counts the number of coordinate pairs in geometry\n\n func = lambda x: count_coordinates(from_shapely(x))\n return s.geometry.apply(func)\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import geopandas as gpd\n\n In [2]: s = gpd.GeoSeries.from_wkt([\"POINT (0 0)\", \"POINT (1 1)\", None])\n\n In [3]: s\n Out[3]:\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n dtype: geometry\n\n In [4]: s.counts()\n Out[4]:\n 0 1\n 1 1\n 2 0\n dtype: int64\n\n In [5]: d = s.to_frame(\"geometry\")\n Out[5]:\n geometry\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n\n In [6]: d.counts()\n Out[6]:\n 0 1\n 1 1\n 2 0\n Name: geometry, dtype: int64\n \"\"\"\n return register_geoseries_accessor(method)\n\n\n@register_method_factory\n@doc(register_geoseries_method, klass=\":class:`geopandas.GeoDataFrame`\")\ndef register_geodataframe_method(method):\n return register_geodataframe_accessor(method)\n", "path": "dtoolkit/geoaccessor/register.py"}, {"content": "from geopandas import GeoDataFrame\nfrom geopandas import GeoSeries\nfrom pandas.core.accessor import _register_accessor\nfrom pandas.util._decorators import doc\n\n\n@doc(klass=\":class:`geopandas.GeoSeries`\")\ndef register_geoseries_accessor(name: str):\n \"\"\"\n Register a custom accessor on {klass} objects.\n\n This is a temparatory solution to hook method into\n :class:`~geopandas.GeoSeries` or :class:`~geopandas.GeoDataFrame`.\n If `geopandas#1952`_ done, it would be removed from\n :mod:`dtoolkit.geoaccessor`.\n\n .. _geopandas#1952: https://github.com/geopandas/geopandas/pull/1952\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n Returns\n -------\n callable\n A class decorator.\n\n See Also\n --------\n register_geoseries_accessor\n register_geodataframe_accessor\n dtoolkit.geoaccessor.register.register_geoseries_method\n dtoolkit.geoaccessor.register.register_geodataframe_method\n\n Notes\n -----\n When accessed, your accessor will be initialized with the geopandas object\n the user is interacting with. So the signature must be::\n\n def __init__(self, geopandas_object): # noqa: E999\n ...\n\n For consistency with geopandas methods, you should raise an\n ``AttributeError`` if the data passed to your accessor has an incorrect\n dtype.\n\n >>> import geopandas as gpd\n >>> gpd.GeoSeries().dt\n Traceback (most recent call last):\n ...\n AttributeError: Can only use .dt accessor with datetimelike values\n\n Examples\n --------\n In your library code::\n\n from pygeos import count_coordinates, from_shapely\n\n @register_geodataframe_accessor(\"coords\")\n @register_geoseries_accessor(\"coords\")\n class CoordinateAccessor:\n def __init__(self, gpd_obj):\n self._obj = gpd_obj\n self.geometry = gpd_obj.geometry\n\n @property\n def count_coordinates(self):\n # Counts the number of coordinate pairs in geometry\n\n return self.geometry.apply(count_coordinates(from_shapely(x)))\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import geopandas as gpd\n\n In [2]: s = gpd.GeoSeries.from_wkt([\"POINT (0 0)\", \"POINT (1 1)\", None])\n\n In [3]: s\n Out[3]:\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n dtype: geometry\n\n In [4]: s.coords.count_coordinates\n Out[4]:\n 0 1\n 1 1\n 2 0\n dtype: int64\n\n In [5]: d = s.to_frame(\"geometry\")\n Out[5]:\n geometry\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n\n In [6]: d.coords.count_coordinates\n Out[6]:\n 0 1\n 1 1\n 2 0\n Name: geometry, dtype: int64\n \"\"\"\n\n return _register_accessor(name, GeoSeries)\n\n\n@doc(register_geoseries_accessor, klass=\":class:`geopandas.GeoDataFrame`\")\ndef register_geodataframe_accessor(name: str):\n\n return _register_accessor(name, GeoDataFrame)\n", "path": "dtoolkit/geoaccessor/accessor.py"}]} | 2,468 | 284 |
gh_patches_debug_4229 | rasdani/github-patches | git_diff | twisted__twisted-11816 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
twisted.web.pages.errorPage docstring has a typo
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.
Should be:
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.
</issue>
<code>
[start of src/twisted/web/pages.py]
1 # -*- test-case-name: twisted.web.test.test_pages -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5 """
6 Utility implementations of L{IResource}.
7 """
8
9 __all__ = (
10 "errorPage",
11 "notFound",
12 "forbidden",
13 )
14
15 from typing import cast
16
17 from twisted.web import http
18 from twisted.web.iweb import IRenderable, IRequest
19 from twisted.web.resource import IResource, Resource
20 from twisted.web.template import renderElement, tags
21
22
23 class _ErrorPage(Resource):
24 """
25 L{_ErrorPage} is a resource that responds to all requests with a particular
26 (parameterized) HTTP status code and an HTML body containing some
27 descriptive text. This is useful for rendering simple error pages.
28
29 @see: L{twisted.web.pages.errorPage}
30
31 @ivar _code: An integer HTTP status code which will be used for the
32 response.
33
34 @ivar _brief: A short string which will be included in the response body as
35 the page title.
36
37 @ivar _detail: A longer string which will be included in the response body.
38 """
39
40 def __init__(self, code: int, brief: str, detail: str) -> None:
41 super().__init__()
42 self._code: int = code
43 self._brief: str = brief
44 self._detail: str = detail
45
46 def render(self, request: IRequest) -> object:
47 """
48 Respond to all requests with the given HTTP status code and an HTML
49 document containing the explanatory strings.
50 """
51 request.setResponseCode(self._code)
52 request.setHeader(b"content-type", b"text/html; charset=utf-8")
53 return renderElement(
54 request,
55 # cast because the type annotations here seem off; Tag isn't an
56 # IRenderable but also probably should be? See
57 # https://github.com/twisted/twisted/issues/4982
58 cast(
59 IRenderable,
60 tags.html(
61 tags.head(tags.title(f"{self._code} - {self._brief}")),
62 tags.body(tags.h1(self._brief), tags.p(self._detail)),
63 ),
64 ),
65 )
66
67 def getChild(self, path: bytes, request: IRequest) -> Resource:
68 """
69 Handle all requests for which L{_ErrorPage} lacks a child by returning
70 this error page.
71
72 @param path: A path segment.
73
74 @param request: HTTP request
75 """
76 return self
77
78
79 def errorPage(code: int, brief: str, detail: str) -> IResource:
80 """
81 Build a resource that responds to all requests with a particular HTTP
82 status code and an HTML body containing some descriptive text. This is
83 useful for rendering simple error pages.
84
85 The resource dynamically handles all paths below it. Use
86 L{IResource.putChild()} override specific path.
87
88 @param code: An integer HTTP status code which will be used for the
89 response.
90
91 @param brief: A short string which will be included in the response
92 body as the page title.
93
94 @param detail: A longer string which will be included in the
95 response body.
96
97 @returns: An L{IResource}
98 """
99 return _ErrorPage(code, brief, detail)
100
101
102 def notFound(
103 brief: str = "No Such Resource",
104 message: str = "Sorry. No luck finding that resource.",
105 ) -> IResource:
106 """
107 Generate an L{IResource} with a 404 Not Found status code.
108
109 @see: L{twisted.web.pages.errorPage}
110
111 @param brief: A short string displayed as the page title.
112
113 @param brief: A longer string displayed in the page body.
114
115 @returns: An L{IResource}
116 """
117 return _ErrorPage(http.NOT_FOUND, brief, message)
118
119
120 def forbidden(
121 brief: str = "Forbidden Resource", message: str = "Sorry, resource is forbidden."
122 ) -> IResource:
123 """
124 Generate an L{IResource} with a 403 Forbidden status code.
125
126 @see: L{twisted.web.pages.errorPage}
127
128 @param brief: A short string displayed as the page title.
129
130 @param brief: A longer string displayed in the page body.
131
132 @returns: An L{IResource}
133 """
134 return _ErrorPage(http.FORBIDDEN, brief, message)
135
[end of src/twisted/web/pages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py
--- a/src/twisted/web/pages.py
+++ b/src/twisted/web/pages.py
@@ -83,7 +83,7 @@
useful for rendering simple error pages.
The resource dynamically handles all paths below it. Use
- L{IResource.putChild()} override specific path.
+ L{IResource.putChild()} to override a specific path.
@param code: An integer HTTP status code which will be used for the
response.
| {"golden_diff": "diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py\n--- a/src/twisted/web/pages.py\n+++ b/src/twisted/web/pages.py\n@@ -83,7 +83,7 @@\n useful for rendering simple error pages.\n \n The resource dynamically handles all paths below it. Use\n- L{IResource.putChild()} override specific path.\n+ L{IResource.putChild()} to override a specific path.\n \n @param code: An integer HTTP status code which will be used for the\n response.\n", "issue": "twisted.web.pages.errorPage docstring has a typo\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.\r\n\r\nShould be:\r\n\r\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.\n", "before_files": [{"content": "# -*- test-case-name: twisted.web.test.test_pages -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUtility implementations of L{IResource}.\n\"\"\"\n\n__all__ = (\n \"errorPage\",\n \"notFound\",\n \"forbidden\",\n)\n\nfrom typing import cast\n\nfrom twisted.web import http\nfrom twisted.web.iweb import IRenderable, IRequest\nfrom twisted.web.resource import IResource, Resource\nfrom twisted.web.template import renderElement, tags\n\n\nclass _ErrorPage(Resource):\n \"\"\"\n L{_ErrorPage} is a resource that responds to all requests with a particular\n (parameterized) HTTP status code and an HTML body containing some\n descriptive text. This is useful for rendering simple error pages.\n\n @see: L{twisted.web.pages.errorPage}\n\n @ivar _code: An integer HTTP status code which will be used for the\n response.\n\n @ivar _brief: A short string which will be included in the response body as\n the page title.\n\n @ivar _detail: A longer string which will be included in the response body.\n \"\"\"\n\n def __init__(self, code: int, brief: str, detail: str) -> None:\n super().__init__()\n self._code: int = code\n self._brief: str = brief\n self._detail: str = detail\n\n def render(self, request: IRequest) -> object:\n \"\"\"\n Respond to all requests with the given HTTP status code and an HTML\n document containing the explanatory strings.\n \"\"\"\n request.setResponseCode(self._code)\n request.setHeader(b\"content-type\", b\"text/html; charset=utf-8\")\n return renderElement(\n request,\n # cast because the type annotations here seem off; Tag isn't an\n # IRenderable but also probably should be? See\n # https://github.com/twisted/twisted/issues/4982\n cast(\n IRenderable,\n tags.html(\n tags.head(tags.title(f\"{self._code} - {self._brief}\")),\n tags.body(tags.h1(self._brief), tags.p(self._detail)),\n ),\n ),\n )\n\n def getChild(self, path: bytes, request: IRequest) -> Resource:\n \"\"\"\n Handle all requests for which L{_ErrorPage} lacks a child by returning\n this error page.\n\n @param path: A path segment.\n\n @param request: HTTP request\n \"\"\"\n return self\n\n\ndef errorPage(code: int, brief: str, detail: str) -> IResource:\n \"\"\"\n Build a resource that responds to all requests with a particular HTTP\n status code and an HTML body containing some descriptive text. This is\n useful for rendering simple error pages.\n\n The resource dynamically handles all paths below it. Use\n L{IResource.putChild()} override specific path.\n\n @param code: An integer HTTP status code which will be used for the\n response.\n\n @param brief: A short string which will be included in the response\n body as the page title.\n\n @param detail: A longer string which will be included in the\n response body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(code, brief, detail)\n\n\ndef notFound(\n brief: str = \"No Such Resource\",\n message: str = \"Sorry. No luck finding that resource.\",\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 404 Not Found status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.NOT_FOUND, brief, message)\n\n\ndef forbidden(\n brief: str = \"Forbidden Resource\", message: str = \"Sorry, resource is forbidden.\"\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 403 Forbidden status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.FORBIDDEN, brief, message)\n", "path": "src/twisted/web/pages.py"}]} | 1,883 | 122 |
gh_patches_debug_40353 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1758 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only collect `db.statement` if there is sanitization
Spec https://github.com/open-telemetry/opentelemetry-specification/pull/3127
- [ ] [aiopg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-aiopg)
- [ ] [asyncpg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-asyncpg)
- [ ] [dbapi](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-dbapi)
- [ ] [elasticsearch](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-elasticsearch)
- [ ] [mysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-mysql)
- [ ] [pymemcache](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymemcache)
- [ ] [pymongo](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymongo)
- [ ] [pymysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymysql)
- [ ] [redis](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-redis)
- [ ] [sqlalchemy](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlalchemy)
- [ ] [sqlite3](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlite3)
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 sanitized_keys = (
16 "message",
17 "should",
18 "filter",
19 "query",
20 "queries",
21 "intervals",
22 "match",
23 )
24 sanitized_value = "?"
25
26
27 # pylint: disable=C0103
28 def _flatten_dict(d, parent_key=""):
29 items = []
30 for k, v in d.items():
31 new_key = parent_key + "." + k if parent_key else k
32 if isinstance(v, dict):
33 items.extend(_flatten_dict(v, new_key).items())
34 else:
35 items.append((new_key, v))
36 return dict(items)
37
38
39 def _unflatten_dict(d):
40 res = {}
41 for k, v in d.items():
42 keys = k.split(".")
43 d = res
44 for key in keys[:-1]:
45 if key not in d:
46 d[key] = {}
47 d = d[key]
48 d[keys[-1]] = v
49 return res
50
51
52 def sanitize_body(body) -> str:
53 flatten_body = _flatten_dict(body)
54
55 for key in flatten_body:
56 if key.endswith(sanitized_keys):
57 flatten_body[key] = sanitized_value
58
59 return str(_unflatten_dict(flatten_body))
60
[end of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py]
[start of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing HTTP elasticsearch made by the
17 `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
25 import elasticsearch
26
27
28 # instrument elasticsearch
29 ElasticsearchInstrumentor().instrument()
30
31 # Using elasticsearch as normal now will automatically generate spans
32 es = elasticsearch.Elasticsearch()
33 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
34 es.get(index='my-index', doc_type='my-type', id=1)
35
36 Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
37 can be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX
38 environment variable or by passing the prefix as an argument to the instrumentor. For example,
39
40
41 .. code-block:: python
42
43 ElasticsearchInstrumentor("my-custom-prefix").instrument()
44
45 The instrument() method accepts the following keyword args:
46 tracer_provider (TracerProvider) - an optional tracer provider
47 sanitize_query (bool) - an optional query sanitization flag
48 request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
49 this function signature is:
50 def request_hook(span: Span, method: str, url: str, kwargs)
51
52 response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
53 this function signature is:
54 def response_hook(span: Span, response: dict)
55
56 for example:
57
58 .. code: python
59
60 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
61 import elasticsearch
62
63 def request_hook(span, method, url, kwargs):
64 if span and span.is_recording():
65 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
66
67 def response_hook(span, response):
68 if span and span.is_recording():
69 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
70
71 # instrument elasticsearch with request and response hooks
72 ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
73
74 # Using elasticsearch as normal now will automatically generate spans,
75 # including user custom attributes added from the hooks
76 es = elasticsearch.Elasticsearch()
77 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
78 es.get(index='my-index', doc_type='my-type', id=1)
79
80 API
81 ---
82 """
83
84 import re
85 from logging import getLogger
86 from os import environ
87 from typing import Collection
88
89 import elasticsearch
90 import elasticsearch.exceptions
91 from wrapt import wrap_function_wrapper as _wrap
92
93 from opentelemetry.instrumentation.elasticsearch.package import _instruments
94 from opentelemetry.instrumentation.elasticsearch.version import __version__
95 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
96 from opentelemetry.instrumentation.utils import unwrap
97 from opentelemetry.semconv.trace import SpanAttributes
98 from opentelemetry.trace import SpanKind, get_tracer
99
100 from .utils import sanitize_body
101
102 logger = getLogger(__name__)
103
104
105 # Values to add as tags from the actual
106 # payload returned by Elasticsearch, if any.
107 _ATTRIBUTES_FROM_RESULT = [
108 "found",
109 "timed_out",
110 "took",
111 ]
112
113 _DEFAULT_OP_NAME = "request"
114
115
116 class ElasticsearchInstrumentor(BaseInstrumentor):
117 """An instrumentor for elasticsearch
118 See `BaseInstrumentor`
119 """
120
121 def __init__(self, span_name_prefix=None):
122 if not span_name_prefix:
123 span_name_prefix = environ.get(
124 "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX",
125 "Elasticsearch",
126 )
127 self._span_name_prefix = span_name_prefix.strip()
128 super().__init__()
129
130 def instrumentation_dependencies(self) -> Collection[str]:
131 return _instruments
132
133 def _instrument(self, **kwargs):
134 """
135 Instruments Elasticsearch module
136 """
137 tracer_provider = kwargs.get("tracer_provider")
138 tracer = get_tracer(__name__, __version__, tracer_provider)
139 request_hook = kwargs.get("request_hook")
140 response_hook = kwargs.get("response_hook")
141 sanitize_query = kwargs.get("sanitize_query", False)
142 _wrap(
143 elasticsearch,
144 "Transport.perform_request",
145 _wrap_perform_request(
146 tracer,
147 sanitize_query,
148 self._span_name_prefix,
149 request_hook,
150 response_hook,
151 ),
152 )
153
154 def _uninstrument(self, **kwargs):
155 unwrap(elasticsearch.Transport, "perform_request")
156
157
158 _regex_doc_url = re.compile(r"/_doc/([^/]+)")
159
160 # search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
161 _regex_search_url = re.compile(r"/([^/]+)/_search[/]?")
162
163
164 def _wrap_perform_request(
165 tracer,
166 sanitize_query,
167 span_name_prefix,
168 request_hook=None,
169 response_hook=None,
170 ):
171 # pylint: disable=R0912,R0914
172 def wrapper(wrapped, _, args, kwargs):
173 method = url = None
174 try:
175 method, url, *_ = args
176 except IndexError:
177 logger.warning(
178 "expected perform_request to receive two positional arguments. "
179 "Got %d",
180 len(args),
181 )
182
183 op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
184
185 doc_id = None
186 search_target = None
187
188 if url:
189 # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
190 # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
191 # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708
192 match = _regex_doc_url.search(url)
193 if match is not None:
194 # Remove the full document ID from the URL
195 doc_span = match.span()
196 op_name = (
197 span_name_prefix
198 + url[: doc_span[0]]
199 + "/_doc/:id"
200 + url[doc_span[1] :]
201 )
202 # Put the document ID in attributes
203 doc_id = match.group(1)
204 match = _regex_search_url.search(url)
205 if match is not None:
206 op_name = span_name_prefix + "/<target>/_search"
207 search_target = match.group(1)
208
209 params = kwargs.get("params", {})
210 body = kwargs.get("body", None)
211
212 with tracer.start_as_current_span(
213 op_name,
214 kind=SpanKind.CLIENT,
215 ) as span:
216 if callable(request_hook):
217 request_hook(span, method, url, kwargs)
218
219 if span.is_recording():
220 attributes = {
221 SpanAttributes.DB_SYSTEM: "elasticsearch",
222 }
223 if url:
224 attributes["elasticsearch.url"] = url
225 if method:
226 attributes["elasticsearch.method"] = method
227 if body:
228 statement = str(body)
229 if sanitize_query:
230 statement = sanitize_body(body)
231 attributes[SpanAttributes.DB_STATEMENT] = statement
232 if params:
233 attributes["elasticsearch.params"] = str(params)
234 if doc_id:
235 attributes["elasticsearch.id"] = doc_id
236 if search_target:
237 attributes["elasticsearch.target"] = search_target
238 for key, value in attributes.items():
239 span.set_attribute(key, value)
240
241 rv = wrapped(*args, **kwargs)
242 if isinstance(rv, dict) and span.is_recording():
243 for member in _ATTRIBUTES_FROM_RESULT:
244 if member in rv:
245 span.set_attribute(
246 f"elasticsearch.{member}",
247 str(rv[member]),
248 )
249
250 if callable(response_hook):
251 response_hook(span, rv)
252 return rv
253
254 return wrapper
255
[end of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
@@ -44,7 +44,6 @@
The instrument() method accepts the following keyword args:
tracer_provider (TracerProvider) - an optional tracer provider
-sanitize_query (bool) - an optional query sanitization flag
request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
this function signature is:
def request_hook(span: Span, method: str, url: str, kwargs)
@@ -138,13 +137,11 @@
tracer = get_tracer(__name__, __version__, tracer_provider)
request_hook = kwargs.get("request_hook")
response_hook = kwargs.get("response_hook")
- sanitize_query = kwargs.get("sanitize_query", False)
_wrap(
elasticsearch,
"Transport.perform_request",
_wrap_perform_request(
tracer,
- sanitize_query,
self._span_name_prefix,
request_hook,
response_hook,
@@ -163,7 +160,6 @@
def _wrap_perform_request(
tracer,
- sanitize_query,
span_name_prefix,
request_hook=None,
response_hook=None,
@@ -225,10 +221,9 @@
if method:
attributes["elasticsearch.method"] = method
if body:
- statement = str(body)
- if sanitize_query:
- statement = sanitize_body(body)
- attributes[SpanAttributes.DB_STATEMENT] = statement
+ attributes[SpanAttributes.DB_STATEMENT] = sanitize_body(
+ body
+ )
if params:
attributes["elasticsearch.params"] = str(params)
if doc_id:
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py
@@ -29,7 +29,8 @@
items = []
for k, v in d.items():
new_key = parent_key + "." + k if parent_key else k
- if isinstance(v, dict):
+ # recursive call _flatten_dict for a non-empty dict value
+ if isinstance(v, dict) and v:
items.extend(_flatten_dict(v, new_key).items())
else:
items.append((new_key, v))
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n@@ -44,7 +44,6 @@\n \n The instrument() method accepts the following keyword args:\n tracer_provider (TracerProvider) - an optional tracer provider\n-sanitize_query (bool) - an optional query sanitization flag\n request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\n@@ -138,13 +137,11 @@\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n- sanitize_query = kwargs.get(\"sanitize_query\", False)\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer,\n- sanitize_query,\n self._span_name_prefix,\n request_hook,\n response_hook,\n@@ -163,7 +160,6 @@\n \n def _wrap_perform_request(\n tracer,\n- sanitize_query,\n span_name_prefix,\n request_hook=None,\n response_hook=None,\n@@ -225,10 +221,9 @@\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n- statement = str(body)\n- if sanitize_query:\n- statement = sanitize_body(body)\n- attributes[SpanAttributes.DB_STATEMENT] = statement\n+ attributes[SpanAttributes.DB_STATEMENT] = sanitize_body(\n+ body\n+ )\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\ndiff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n@@ -29,7 +29,8 @@\n items = []\n for k, v in d.items():\n new_key = parent_key + \".\" + k if parent_key else k\n- if isinstance(v, dict):\n+ # recursive call _flatten_dict for a non-empty dict value\n+ if isinstance(v, dict) and v:\n items.extend(_flatten_dict(v, new_key).items())\n else:\n items.append((new_key, v))\n", "issue": "Only collect `db.statement` if there is sanitization\nSpec https://github.com/open-telemetry/opentelemetry-specification/pull/3127\r\n- [ ] [aiopg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-aiopg)\r\n- [ ] [asyncpg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-asyncpg)\r\n- [ ] [dbapi](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-dbapi)\r\n- [ ] [elasticsearch](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-elasticsearch)\r\n- [ ] [mysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-mysql)\r\n- [ ] [pymemcache](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymemcache)\r\n- [ ] [pymongo](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymongo)\r\n- [ ] [pymysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymysql)\r\n- [ ] [redis](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-redis)\r\n- [ ] [sqlalchemy](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlalchemy)\r\n- [ ] [sqlite3](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlite3)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsanitized_keys = (\n \"message\",\n \"should\",\n \"filter\",\n \"query\",\n \"queries\",\n \"intervals\",\n \"match\",\n)\nsanitized_value = \"?\"\n\n\n# pylint: disable=C0103\ndef _flatten_dict(d, parent_key=\"\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + \".\" + k if parent_key else k\n if isinstance(v, dict):\n items.extend(_flatten_dict(v, new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef _unflatten_dict(d):\n res = {}\n for k, v in d.items():\n keys = k.split(\".\")\n d = res\n for key in keys[:-1]:\n if key not in d:\n d[key] = {}\n d = d[key]\n d[keys[-1]] = v\n return res\n\n\ndef sanitize_body(body) -> str:\n flatten_body = _flatten_dict(body)\n\n for key in flatten_body:\n if key.endswith(sanitized_keys):\n flatten_body[key] = sanitized_value\n\n return str(_unflatten_dict(flatten_body))\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\nThe instrument() method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\nsanitize_query (bool) - an optional query sanitization flag\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\nthis function signature is:\ndef request_hook(span: Span, method: str, url: str, kwargs)\n\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\nthis function signature is:\ndef response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nfrom .utils import sanitize_body\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments Elasticsearch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n sanitize_query = kwargs.get(\"sanitize_query\", False)\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer,\n sanitize_query,\n self._span_name_prefix,\n request_hook,\n response_hook,\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n\n\ndef _wrap_perform_request(\n tracer,\n sanitize_query,\n span_name_prefix,\n request_hook=None,\n response_hook=None,\n):\n # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n\n doc_id = None\n search_target = None\n\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n match = _regex_search_url.search(url)\n if match is not None:\n op_name = span_name_prefix + \"/<target>/_search\"\n search_target = match.group(1)\n\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n statement = str(body)\n if sanitize_query:\n statement = sanitize_body(body)\n attributes[SpanAttributes.DB_STATEMENT] = statement\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n if search_target:\n attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}]} | 4,020 | 662 |
gh_patches_debug_38939 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-3363 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up AWS Batch job definition list
### Problem or idea
The Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.
### Solution or next step
Clean up stale items, make sure job deregistering script takes care of old job definitions in a right way.
</issue>
<code>
[start of infrastructure/delete_batch_job_queue.py]
1 import os
2 from time import sleep
3
4 import boto3
5
6 AWS_REGION = os.environ["AWS_REGION"]
7 AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
8
9 batch = boto3.client("batch", region_name=AWS_REGION)
10
11 # First disable each job queue.
12 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
13 try:
14 batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
15 except Exception as e:
16 # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
17 pass
18
19 # Then wait for each one to be disabled so it can be deleted.
20 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
21 while True:
22 job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])
23 if "jobQueues" in job_queues:
24 job_queue = job_queues["jobQueues"][0]
25 if job_queue["state"] == "DISABLED" and job_queue["status"] != "UPDATING":
26 break
27 else:
28 print(f"Unexpected response while describing job queue {batch_queue_name}.")
29 break
30
31 sleep(3)
32
33 batch.delete_job_queue(jobQueue=batch_queue_name)
34
[end of infrastructure/delete_batch_job_queue.py]
[start of infrastructure/deregister_batch_job_definitions.py]
1 import os
2
3 import boto3
4
5 AWS_REGION = os.environ["AWS_REGION"]
6
7 batch = boto3.client("batch", region_name=AWS_REGION)
8
9 # TODO: stop repeating this construction everywhere. Just set it once somewhere.
10 JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
11
12 job_definition_files = os.listdir("batch-job-templates")
13
14 job_definition_list = list(
15 {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
16 )
17
18 # Have to go one by one because providing a list of job names doesn't work:
19 # https://github.com/boto/boto3/issues/2908
20 for job_definition in job_definition_list:
21 job_definitions = batch.describe_job_definitions(
22 jobDefinitionName=job_definition, status="ACTIVE"
23 )
24 # There can be multiple revisions per job definition. We want them all gone.
25 for job_definition_revision in job_definitions["jobDefinitions"]:
26 batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
27
[end of infrastructure/deregister_batch_job_definitions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py
--- a/infrastructure/delete_batch_job_queue.py
+++ b/infrastructure/delete_batch_job_queue.py
@@ -2,19 +2,22 @@
from time import sleep
import boto3
+from botocore.exceptions import ClientError
-AWS_REGION = os.environ["AWS_REGION"]
AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# First disable each job queue.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
try:
batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
- except Exception as e:
+ except ClientError as e:
# If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
- pass
+ if str(e).endswith(" does not exist."):
+ pass
+ else:
+ raise e
# Then wait for each one to be disabled so it can be deleted.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
diff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py
--- a/infrastructure/deregister_batch_job_definitions.py
+++ b/infrastructure/deregister_batch_job_definitions.py
@@ -2,25 +2,36 @@
import boto3
-AWS_REGION = os.environ["AWS_REGION"]
-
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# TODO: stop repeating this construction everywhere. Just set it once somewhere.
JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
-job_definition_files = os.listdir("batch-job-templates")
-
-job_definition_list = list(
- {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
+job_names = (
+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(".")[0]
+ for batch_job_template in os.listdir("batch-job-templates")
)
+nextToken = ""
# Have to go one by one because providing a list of job names doesn't work:
# https://github.com/boto/boto3/issues/2908
-for job_definition in job_definition_list:
- job_definitions = batch.describe_job_definitions(
- jobDefinitionName=job_definition, status="ACTIVE"
- )
- # There can be multiple revisions per job definition. We want them all gone.
- for job_definition_revision in job_definitions["jobDefinitions"]:
- batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
+for job_name in sorted(job_names):
+ while True:
+ data = {
+ "jobDefinitionName": job_name,
+ "maxResults": 100,
+ "status": "ACTIVE",
+ }
+ if nextToken:
+ data["nextToken"] = nextToken
+
+ response = batch.describe_job_definitions(**data)
+ nextToken = response.get("nextToken", "")
+
+ job_definitions = response.get("jobDefinitions")
+ if not job_definitions:
+ break
+
+ # There can be multiple revisions per job definition. We want them all gone.
+ for job_definition in job_definitions:
+ batch.deregister_job_definition(jobDefinition=job_definition["jobDefinitionArn"])
| {"golden_diff": "diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py\n--- a/infrastructure/delete_batch_job_queue.py\n+++ b/infrastructure/delete_batch_job_queue.py\n@@ -2,19 +2,22 @@\n from time import sleep\n \n import boto3\n+from botocore.exceptions import ClientError\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n AWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n \n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # First disable each job queue.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n- except Exception as e:\n+ except ClientError as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n- pass\n+ if str(e).endswith(\" does not exist.\"):\n+ pass\n+ else:\n+ raise e\n \n # Then wait for each one to be disabled so it can be deleted.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\ndiff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py\n--- a/infrastructure/deregister_batch_job_definitions.py\n+++ b/infrastructure/deregister_batch_job_definitions.py\n@@ -2,25 +2,36 @@\n \n import boto3\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n-\n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # TODO: stop repeating this construction everywhere. Just set it once somewhere.\n JOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n \n-job_definition_files = os.listdir(\"batch-job-templates\")\n-\n-job_definition_list = list(\n- {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n+job_names = (\n+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(\".\")[0]\n+ for batch_job_template in os.listdir(\"batch-job-templates\")\n )\n+nextToken = \"\"\n \n # Have to go one by one because providing a list of job names doesn't work:\n # https://github.com/boto/boto3/issues/2908\n-for job_definition in job_definition_list:\n- job_definitions = batch.describe_job_definitions(\n- jobDefinitionName=job_definition, status=\"ACTIVE\"\n- )\n- # There can be multiple revisions per job definition. We want them all gone.\n- for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n- batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n+for job_name in sorted(job_names):\n+ while True:\n+ data = {\n+ \"jobDefinitionName\": job_name,\n+ \"maxResults\": 100,\n+ \"status\": \"ACTIVE\",\n+ }\n+ if nextToken:\n+ data[\"nextToken\"] = nextToken\n+\n+ response = batch.describe_job_definitions(**data)\n+ nextToken = response.get(\"nextToken\", \"\")\n+\n+ job_definitions = response.get(\"jobDefinitions\")\n+ if not job_definitions:\n+ break\n+\n+ # There can be multiple revisions per job definition. We want them all gone.\n+ for job_definition in job_definitions:\n+ batch.deregister_job_definition(jobDefinition=job_definition[\"jobDefinitionArn\"])\n", "issue": "Clean up AWS Batch job definition list\n### Problem or idea\r\n\r\nThe Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.\r\n\r\n\r\n### Solution or next step\r\n\r\nClean up stale items, make sure job deregistering script takes care of old job definitions in a right way.\r\n\n", "before_files": [{"content": "import os\nfrom time import sleep\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\nAWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# First disable each job queue.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n except Exception as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n pass\n\n# Then wait for each one to be disabled so it can be deleted.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n while True:\n job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])\n if \"jobQueues\" in job_queues:\n job_queue = job_queues[\"jobQueues\"][0]\n if job_queue[\"state\"] == \"DISABLED\" and job_queue[\"status\"] != \"UPDATING\":\n break\n else:\n print(f\"Unexpected response while describing job queue {batch_queue_name}.\")\n break\n\n sleep(3)\n\n batch.delete_job_queue(jobQueue=batch_queue_name)\n", "path": "infrastructure/delete_batch_job_queue.py"}, {"content": "import os\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# TODO: stop repeating this construction everywhere. Just set it once somewhere.\nJOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n\njob_definition_files = os.listdir(\"batch-job-templates\")\n\njob_definition_list = list(\n {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n)\n\n# Have to go one by one because providing a list of job names doesn't work:\n# https://github.com/boto/boto3/issues/2908\nfor job_definition in job_definition_list:\n job_definitions = batch.describe_job_definitions(\n jobDefinitionName=job_definition, status=\"ACTIVE\"\n )\n # There can be multiple revisions per job definition. We want them all gone.\n for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n", "path": "infrastructure/deregister_batch_job_definitions.py"}]} | 1,237 | 786 |
gh_patches_debug_10043 | rasdani/github-patches | git_diff | nautobot__nautobot-877 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API
### Proposed Functionality
Before the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.
### Use Cases
As Patti the Platform Admin,
I want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,
So that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc.
One option is to create an "Admin" dropdown in the navigation bar which contains "Users (no change)," "Social Auth (drop 'Python')," and "System" sections. We may need one additional section called "plugins" for when plugins have created entries in Django Admin.
I will know this is done when it is possible to:
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls
</issue>
<code>
[start of nautobot/core/admin.py]
1 from django.conf import settings
2 from django.contrib.admin import site as admin_site
3 from taggit.models import Tag
4
5
6 # Override default AdminSite attributes so we can avoid creating and
7 # registering our own class
8 admin_site.site_header = "Nautobot Administration"
9 admin_site.site_title = "Nautobot"
10 admin_site.index_template = "admin/nautobot_index.html"
11
12 # Unregister the unused stock Tag model provided by django-taggit
13 admin_site.unregister(Tag)
14
[end of nautobot/core/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py
--- a/nautobot/core/admin.py
+++ b/nautobot/core/admin.py
@@ -1,5 +1,6 @@
from django.conf import settings
from django.contrib.admin import site as admin_site
+from social_django.models import Association, Nonce, UserSocialAuth
from taggit.models import Tag
@@ -11,3 +12,8 @@
# Unregister the unused stock Tag model provided by django-taggit
admin_site.unregister(Tag)
+
+# Unregister SocialAuth from Django admin menu
+admin_site.unregister(Association)
+admin_site.unregister(Nonce)
+admin_site.unregister(UserSocialAuth)
| {"golden_diff": "diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py\n--- a/nautobot/core/admin.py\n+++ b/nautobot/core/admin.py\n@@ -1,5 +1,6 @@\n from django.conf import settings\n from django.contrib.admin import site as admin_site\n+from social_django.models import Association, Nonce, UserSocialAuth\n from taggit.models import Tag\n \n \n@@ -11,3 +12,8 @@\n \n # Unregister the unused stock Tag model provided by django-taggit\n admin_site.unregister(Tag)\n+\n+# Unregister SocialAuth from Django admin menu\n+admin_site.unregister(Association)\n+admin_site.unregister(Nonce)\n+admin_site.unregister(UserSocialAuth)\n", "issue": "Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API\n### Proposed Functionality \r\n\r\nBefore the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.\r\n\r\n### Use Cases\r\n\r\nAs Patti the Platform Admin,\r\nI want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,\r\nSo that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc. \r\n\r\nOne option is to create an \"Admin\" dropdown in the navigation bar which contains \"Users (no change),\" \"Social Auth (drop 'Python'),\" and \"System\" sections. We may need one additional section called \"plugins\" for when plugins have created entries in Django Admin.\r\n\r\nI will know this is done when it is possible to:\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.admin import site as admin_site\nfrom taggit.models import Tag\n\n\n# Override default AdminSite attributes so we can avoid creating and\n# registering our own class\nadmin_site.site_header = \"Nautobot Administration\"\nadmin_site.site_title = \"Nautobot\"\nadmin_site.index_template = \"admin/nautobot_index.html\"\n\n# Unregister the unused stock Tag model provided by django-taggit\nadmin_site.unregister(Tag)\n", "path": "nautobot/core/admin.py"}]} | 957 | 153 |
gh_patches_debug_6459 | rasdani/github-patches | git_diff | holoviz__panel-5490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--autoreload does not work in Panel 1.2.2
Run this code with `panel serve --autoreload tmp.py` and change the end to 2 the
``` python
# tmp.py
import panel as pn
pn.widgets.IntSlider(end=1).servable()
```
A git bisect indicates https://github.com/holoviz/panel/pull/5344 is the culprit.
</issue>
<code>
[start of panel/io/callbacks.py]
1 """
2 Defines callbacks to be executed on a thread or by scheduling it
3 on a running bokeh server.
4 """
5 import asyncio
6 import inspect
7 import logging
8 import time
9
10 from functools import partial
11
12 import param
13
14 from ..util import edit_readonly, function_name
15 from .logging import LOG_PERIODIC_END, LOG_PERIODIC_START
16 from .state import curdoc_locked, state
17
18 log = logging.getLogger('panel.callbacks')
19 _periodic_logger = logging.getLogger(f'{__name__}.PeriodicCallback')
20
21 class PeriodicCallback(param.Parameterized):
22 """
23 Periodic encapsulates a periodic callback which will run both
24 in tornado based notebook environments and on bokeh server. By
25 default the callback will run until the stop method is called,
26 but count and timeout values can be set to limit the number of
27 executions or the maximum length of time for which the callback
28 will run. The callback may also be started and stopped by setting
29 the running parameter to True or False respectively.
30 """
31
32 callback = param.Callable(doc="""
33 The callback to execute periodically.""")
34
35 counter = param.Integer(default=0, doc="""
36 Counts the number of executions.""")
37
38 count = param.Integer(default=None, doc="""
39 Number of times the callback will be executed, by default
40 this is unlimited.""")
41
42 log = param.Boolean(default=True, doc="""
43 Whether the periodic callback should log its actions.""")
44
45 period = param.Integer(default=500, doc="""
46 Period in milliseconds at which the callback is executed.""")
47
48 timeout = param.Integer(default=None, doc="""
49 Timeout in milliseconds from the start time at which the callback
50 expires.""")
51
52 running = param.Boolean(default=False, doc="""
53 Toggles whether the periodic callback is currently running.""")
54
55 def __init__(self, **params):
56 self._background = params.pop('background', False)
57 super().__init__(**params)
58 self._start_time = None
59 self._cb = None
60 self._updating = False
61 self._doc = None
62
63 @param.depends('running', watch=True)
64 def _start(self):
65 if not self.running or self._updating:
66 return
67 self.start()
68
69 @param.depends('running', watch=True)
70 def _stop(self):
71 if self.running or self._updating:
72 return
73 self.stop()
74
75 @param.depends('period', watch=True)
76 def _update_period(self):
77 if self._cb:
78 self.stop()
79 self.start()
80
81 def _exec_callback(self, post=False):
82 from .state import set_curdoc
83 try:
84 with set_curdoc(self._doc):
85 if self.running:
86 self.counter += 1
87 if self.counter > self.count:
88 self.stop()
89 cb = self.callback() if self.running else None
90 except Exception:
91 cb = None
92 if post:
93 self._post_callback()
94 return cb
95
96 def _post_callback(self):
97 cbname = function_name(self.callback)
98 if self._doc and self.log:
99 _periodic_logger.info(
100 LOG_PERIODIC_END, id(self._doc), cbname, self.counter
101 )
102 if not self._background:
103 with edit_readonly(state):
104 state._busy_counter -= 1
105 if self.timeout is not None:
106 dt = (time.time() - self._start_time) * 1000
107 if dt > self.timeout:
108 self.stop()
109 if self.counter == self.count:
110 self.stop()
111
112 async def _periodic_callback(self):
113 if not self._background:
114 with edit_readonly(state):
115 state._busy_counter += 1
116 cbname = function_name(self.callback)
117 if self._doc and self.log:
118 _periodic_logger.info(
119 LOG_PERIODIC_START, id(self._doc), cbname, self.counter
120 )
121 is_async = (
122 inspect.isasyncgenfunction(self.callback) or
123 inspect.iscoroutinefunction(self.callback)
124 )
125 if state._thread_pool and not is_async:
126 future = state._thread_pool.submit(self._exec_callback, True)
127 future.add_done_callback(partial(state._handle_future_exception, doc=self._doc))
128 return
129 try:
130 cb = self._exec_callback()
131 if inspect.isawaitable(cb):
132 await cb
133 except Exception:
134 log.exception('Periodic callback failed.')
135 raise
136 finally:
137 self._post_callback()
138
139 async def _async_repeat(self, func):
140 """
141 Run func every interval seconds.
142
143 If func has not finished before *interval*, will run again
144 immediately when the previous iteration finished.
145 """
146 while True:
147 start = time.monotonic()
148 await func()
149 timeout = (self.period/1000.) - (time.monotonic()-start)
150 if timeout > 0:
151 await asyncio.sleep(timeout)
152
153 def _cleanup(self, session_context):
154 self.stop()
155
156 def start(self):
157 """
158 Starts running the periodic callback.
159 """
160 if self._cb is not None:
161 raise RuntimeError('Periodic callback has already started.')
162 if not self.running:
163 try:
164 self._updating = True
165 self.running = True
166 finally:
167 self._updating = False
168 self._start_time = time.time()
169 if state._is_pyodide:
170 self._cb = asyncio.create_task(
171 self._async_repeat(self._periodic_callback)
172 )
173 elif state.curdoc and state.curdoc.session_context:
174 self._doc = state.curdoc
175 if state._unblocked(state.curdoc):
176 self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)
177 else:
178 self._doc.add_next_tick_callback(self.start)
179 else:
180 from tornado.ioloop import PeriodicCallback
181 self._cb = PeriodicCallback(lambda: asyncio.create_task(self._periodic_callback()), self.period)
182 self._cb.start()
183
184 def stop(self):
185 """
186 Stops running the periodic callback.
187 """
188 if self.running:
189 try:
190 self._updating = True
191 self.running = False
192 finally:
193 self._updating = False
194 with param.discard_events(self):
195 self.counter = 0
196 self._timeout = None
197 if state._is_pyodide:
198 self._cb.cancel()
199 elif self._doc:
200 if self._doc._session_context:
201 self._doc.callbacks.remove_session_callback(self._cb)
202 else:
203 self._doc.callbacks._session_callbacks.remove(self._cb)
204 elif self._cb:
205 self._cb.stop()
206 self._cb = None
207 doc = self._doc or curdoc_locked()
208 if doc:
209 doc.callbacks.session_destroyed_callbacks = {
210 cb for cb in doc.callbacks.session_destroyed_callbacks
211 if cb is not self._cleanup
212 }
213 self._doc = None
214
[end of panel/io/callbacks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/io/callbacks.py b/panel/io/callbacks.py
--- a/panel/io/callbacks.py
+++ b/panel/io/callbacks.py
@@ -84,7 +84,7 @@
with set_curdoc(self._doc):
if self.running:
self.counter += 1
- if self.counter > self.count:
+ if self.count is not None and self.counter > self.count:
self.stop()
cb = self.callback() if self.running else None
except Exception:
| {"golden_diff": "diff --git a/panel/io/callbacks.py b/panel/io/callbacks.py\n--- a/panel/io/callbacks.py\n+++ b/panel/io/callbacks.py\n@@ -84,7 +84,7 @@\n with set_curdoc(self._doc):\n if self.running:\n self.counter += 1\n- if self.counter > self.count:\n+ if self.count is not None and self.counter > self.count:\n self.stop()\n cb = self.callback() if self.running else None\n except Exception:\n", "issue": "--autoreload does not work in Panel 1.2.2\nRun this code with `panel serve --autoreload tmp.py` and change the end to 2 the \r\n\r\n``` python\r\n# tmp.py\r\nimport panel as pn\r\n\r\npn.widgets.IntSlider(end=1).servable()\r\n```\r\n\r\nA git bisect indicates https://github.com/holoviz/panel/pull/5344 is the culprit. \n", "before_files": [{"content": "\"\"\"\nDefines callbacks to be executed on a thread or by scheduling it\non a running bokeh server.\n\"\"\"\nimport asyncio\nimport inspect\nimport logging\nimport time\n\nfrom functools import partial\n\nimport param\n\nfrom ..util import edit_readonly, function_name\nfrom .logging import LOG_PERIODIC_END, LOG_PERIODIC_START\nfrom .state import curdoc_locked, state\n\nlog = logging.getLogger('panel.callbacks')\n_periodic_logger = logging.getLogger(f'{__name__}.PeriodicCallback')\n\nclass PeriodicCallback(param.Parameterized):\n \"\"\"\n Periodic encapsulates a periodic callback which will run both\n in tornado based notebook environments and on bokeh server. By\n default the callback will run until the stop method is called,\n but count and timeout values can be set to limit the number of\n executions or the maximum length of time for which the callback\n will run. The callback may also be started and stopped by setting\n the running parameter to True or False respectively.\n \"\"\"\n\n callback = param.Callable(doc=\"\"\"\n The callback to execute periodically.\"\"\")\n\n counter = param.Integer(default=0, doc=\"\"\"\n Counts the number of executions.\"\"\")\n\n count = param.Integer(default=None, doc=\"\"\"\n Number of times the callback will be executed, by default\n this is unlimited.\"\"\")\n\n log = param.Boolean(default=True, doc=\"\"\"\n Whether the periodic callback should log its actions.\"\"\")\n\n period = param.Integer(default=500, doc=\"\"\"\n Period in milliseconds at which the callback is executed.\"\"\")\n\n timeout = param.Integer(default=None, doc=\"\"\"\n Timeout in milliseconds from the start time at which the callback\n expires.\"\"\")\n\n running = param.Boolean(default=False, doc=\"\"\"\n Toggles whether the periodic callback is currently running.\"\"\")\n\n def __init__(self, **params):\n self._background = params.pop('background', False)\n super().__init__(**params)\n self._start_time = None\n self._cb = None\n self._updating = False\n self._doc = None\n\n @param.depends('running', watch=True)\n def _start(self):\n if not self.running or self._updating:\n return\n self.start()\n\n @param.depends('running', watch=True)\n def _stop(self):\n if self.running or self._updating:\n return\n self.stop()\n\n @param.depends('period', watch=True)\n def _update_period(self):\n if self._cb:\n self.stop()\n self.start()\n\n def _exec_callback(self, post=False):\n from .state import set_curdoc\n try:\n with set_curdoc(self._doc):\n if self.running:\n self.counter += 1\n if self.counter > self.count:\n self.stop()\n cb = self.callback() if self.running else None\n except Exception:\n cb = None\n if post:\n self._post_callback()\n return cb\n\n def _post_callback(self):\n cbname = function_name(self.callback)\n if self._doc and self.log:\n _periodic_logger.info(\n LOG_PERIODIC_END, id(self._doc), cbname, self.counter\n )\n if not self._background:\n with edit_readonly(state):\n state._busy_counter -= 1\n if self.timeout is not None:\n dt = (time.time() - self._start_time) * 1000\n if dt > self.timeout:\n self.stop()\n if self.counter == self.count:\n self.stop()\n\n async def _periodic_callback(self):\n if not self._background:\n with edit_readonly(state):\n state._busy_counter += 1\n cbname = function_name(self.callback)\n if self._doc and self.log:\n _periodic_logger.info(\n LOG_PERIODIC_START, id(self._doc), cbname, self.counter\n )\n is_async = (\n inspect.isasyncgenfunction(self.callback) or\n inspect.iscoroutinefunction(self.callback)\n )\n if state._thread_pool and not is_async:\n future = state._thread_pool.submit(self._exec_callback, True)\n future.add_done_callback(partial(state._handle_future_exception, doc=self._doc))\n return\n try:\n cb = self._exec_callback()\n if inspect.isawaitable(cb):\n await cb\n except Exception:\n log.exception('Periodic callback failed.')\n raise\n finally:\n self._post_callback()\n\n async def _async_repeat(self, func):\n \"\"\"\n Run func every interval seconds.\n\n If func has not finished before *interval*, will run again\n immediately when the previous iteration finished.\n \"\"\"\n while True:\n start = time.monotonic()\n await func()\n timeout = (self.period/1000.) - (time.monotonic()-start)\n if timeout > 0:\n await asyncio.sleep(timeout)\n\n def _cleanup(self, session_context):\n self.stop()\n\n def start(self):\n \"\"\"\n Starts running the periodic callback.\n \"\"\"\n if self._cb is not None:\n raise RuntimeError('Periodic callback has already started.')\n if not self.running:\n try:\n self._updating = True\n self.running = True\n finally:\n self._updating = False\n self._start_time = time.time()\n if state._is_pyodide:\n self._cb = asyncio.create_task(\n self._async_repeat(self._periodic_callback)\n )\n elif state.curdoc and state.curdoc.session_context:\n self._doc = state.curdoc\n if state._unblocked(state.curdoc):\n self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)\n else:\n self._doc.add_next_tick_callback(self.start)\n else:\n from tornado.ioloop import PeriodicCallback\n self._cb = PeriodicCallback(lambda: asyncio.create_task(self._periodic_callback()), self.period)\n self._cb.start()\n\n def stop(self):\n \"\"\"\n Stops running the periodic callback.\n \"\"\"\n if self.running:\n try:\n self._updating = True\n self.running = False\n finally:\n self._updating = False\n with param.discard_events(self):\n self.counter = 0\n self._timeout = None\n if state._is_pyodide:\n self._cb.cancel()\n elif self._doc:\n if self._doc._session_context:\n self._doc.callbacks.remove_session_callback(self._cb)\n else:\n self._doc.callbacks._session_callbacks.remove(self._cb)\n elif self._cb:\n self._cb.stop()\n self._cb = None\n doc = self._doc or curdoc_locked()\n if doc:\n doc.callbacks.session_destroyed_callbacks = {\n cb for cb in doc.callbacks.session_destroyed_callbacks\n if cb is not self._cleanup\n }\n self._doc = None\n", "path": "panel/io/callbacks.py"}]} | 2,650 | 113 |
gh_patches_debug_4919 | rasdani/github-patches | git_diff | bokeh__bokeh-1361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot style minor ticks
Axis objects do not have minor tick properties.
</issue>
<code>
[start of bokeh/models/axes.py]
1 from __future__ import absolute_import
2
3 from ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include
4 from ..mixins import LineProps, TextProps
5 from ..enums import Location
6
7 from .renderers import GuideRenderer
8 from .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker
9 from .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter
10
11 class Axis(GuideRenderer):
12 location = Either(Enum('auto'), Enum(Location))
13 bounds = Either(Enum('auto'), Tuple(Float, Float))
14
15 x_range_name = String('default')
16 y_range_name = String('default')
17
18 ticker = Instance(Ticker)
19 formatter = Instance(TickFormatter)
20
21 axis_label = String
22 axis_label_standoff = Int
23 axis_label_props = Include(TextProps)
24
25 major_label_standoff = Int
26 major_label_orientation = Either(Enum("horizontal", "vertical"), Float)
27 major_label_props = Include(TextProps)
28
29 axis_props = Include(LineProps)
30 major_tick_props = Include(LineProps)
31
32 major_tick_in = Int
33 major_tick_out = Int
34
35 class ContinuousAxis(Axis):
36 pass
37
38 class LinearAxis(ContinuousAxis):
39 def __init__(self, ticker=None, formatter=None, **kwargs):
40 if ticker is None:
41 ticker = BasicTicker()
42 if formatter is None:
43 formatter = BasicTickFormatter()
44 super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
45
46 class LogAxis(ContinuousAxis):
47 def __init__(self, ticker=None, formatter=None, **kwargs):
48 if ticker is None:
49 ticker = LogTicker(num_minor_ticks=10)
50 if formatter is None:
51 formatter = LogTickFormatter()
52 super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
53
54 class CategoricalAxis(Axis):
55 def __init__(self, ticker=None, formatter=None, **kwargs):
56 if ticker is None:
57 ticker = CategoricalTicker()
58 if formatter is None:
59 formatter = CategoricalTickFormatter()
60 super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
61
62 class DatetimeAxis(LinearAxis):
63 axis_label = String("date")
64 scale = String("time")
65 num_labels = Int(8)
66 char_width = Int(10)
67 fill_ratio = Float(0.3)
68
69 def __init__(self, ticker=None, formatter=None, **kwargs):
70 if ticker is None:
71 ticker = DatetimeTicker()
72 if formatter is None:
73 formatter = DatetimeTickFormatter()
74 super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
75
[end of bokeh/models/axes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py
--- a/bokeh/models/axes.py
+++ b/bokeh/models/axes.py
@@ -27,11 +27,15 @@
major_label_props = Include(TextProps)
axis_props = Include(LineProps)
- major_tick_props = Include(LineProps)
+ major_tick_props = Include(LineProps)
major_tick_in = Int
major_tick_out = Int
+ minor_tick_props = Include(LineProps)
+ minor_tick_in = Int
+ minor_tick_out = Int
+
class ContinuousAxis(Axis):
pass
| {"golden_diff": "diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py\n--- a/bokeh/models/axes.py\n+++ b/bokeh/models/axes.py\n@@ -27,11 +27,15 @@\n major_label_props = Include(TextProps)\n \n axis_props = Include(LineProps)\n- major_tick_props = Include(LineProps)\n \n+ major_tick_props = Include(LineProps)\n major_tick_in = Int\n major_tick_out = Int\n \n+ minor_tick_props = Include(LineProps)\n+ minor_tick_in = Int\n+ minor_tick_out = Int\n+\n class ContinuousAxis(Axis):\n pass\n", "issue": "Cannot style minor ticks\nAxis objects do not have minor tick properties.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include\nfrom ..mixins import LineProps, TextProps\nfrom ..enums import Location\n\nfrom .renderers import GuideRenderer\nfrom .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker\nfrom .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter\n\nclass Axis(GuideRenderer):\n location = Either(Enum('auto'), Enum(Location))\n bounds = Either(Enum('auto'), Tuple(Float, Float))\n\n x_range_name = String('default')\n y_range_name = String('default')\n\n ticker = Instance(Ticker)\n formatter = Instance(TickFormatter)\n\n axis_label = String\n axis_label_standoff = Int\n axis_label_props = Include(TextProps)\n\n major_label_standoff = Int\n major_label_orientation = Either(Enum(\"horizontal\", \"vertical\"), Float)\n major_label_props = Include(TextProps)\n\n axis_props = Include(LineProps)\n major_tick_props = Include(LineProps)\n\n major_tick_in = Int\n major_tick_out = Int\n\nclass ContinuousAxis(Axis):\n pass\n\nclass LinearAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = BasicTicker()\n if formatter is None:\n formatter = BasicTickFormatter()\n super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass LogAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = LogTicker(num_minor_ticks=10)\n if formatter is None:\n formatter = LogTickFormatter()\n super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass CategoricalAxis(Axis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = CategoricalTicker()\n if formatter is None:\n formatter = CategoricalTickFormatter()\n super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass DatetimeAxis(LinearAxis):\n axis_label = String(\"date\")\n scale = String(\"time\")\n num_labels = Int(8)\n char_width = Int(10)\n fill_ratio = Float(0.3)\n\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = DatetimeTicker()\n if formatter is None:\n formatter = DatetimeTickFormatter()\n super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n", "path": "bokeh/models/axes.py"}]} | 1,294 | 143 |
gh_patches_debug_14499 | rasdani/github-patches | git_diff | optuna__optuna-3545 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show warning message by `GridSearchSampler` whose `CategoricalDistribution` contains unsupported typed values, not raising `ValueError`
### Motivation
The grid search sampler has a strict type rule for its search space for `CategoricalDistribution`, which is not consistent with the other samplers as reported in #3534.
### Description
`CategoriaclDistribution` shows a warning message when the `choices` argument contains an unsupported type.
When we run the following code
```python
import optuna
optuna.distributions.CategoricalDistribution([[1], [2]])
```
then we see the following warning message:
```bash
/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [1] which is of type list.
warnings.warn(message)
/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [2] which is of type list.
warnings.warn(message)
```
On the other hand, the grid search sampler raises `ValueError` when we use such unsupported typed value as an element of `choices` of the distribution.
### Alternatives (optional)
_No response_
### Additional context (optional)
_No response_
</issue>
<code>
[start of optuna/samplers/_grid.py]
1 import collections
2 import itertools
3 import random
4 from typing import Any
5 from typing import cast
6 from typing import Dict
7 from typing import List
8 from typing import Mapping
9 from typing import Optional
10 from typing import Sequence
11 from typing import Union
12 import warnings
13
14 from optuna.distributions import BaseDistribution
15 from optuna.logging import get_logger
16 from optuna.samplers import BaseSampler
17 from optuna.study import Study
18 from optuna.trial import FrozenTrial
19 from optuna.trial import TrialState
20
21
22 GridValueType = Union[str, float, int, bool, None]
23 SortableParamValueSequenceType = Union[List[str], List[float], List[int], List[bool]]
24
25
26 _logger = get_logger(__name__)
27
28
29 class GridSampler(BaseSampler):
30 """Sampler using grid search.
31
32 With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters
33 in the given search space during the study.
34
35 Example:
36
37 .. testcode::
38
39 import optuna
40
41
42 def objective(trial):
43 x = trial.suggest_float("x", -100, 100)
44 y = trial.suggest_int("y", -100, 100)
45 return x**2 + y**2
46
47
48 search_space = {"x": [-50, 0, 50], "y": [-99, 0, 99]}
49 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
50 study.optimize(objective)
51
52 Note:
53
54 :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all
55 combinations in the passed ``search_space`` have already been evaluated, internally
56 invoking the :func:`~optuna.study.Study.stop` method.
57
58 Note:
59
60 :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization
61 specified by discrete suggest methods but just samples one of values specified in the
62 search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is
63 sampled as ``x`` instead of an integer point.
64
65 .. testcode::
66
67 import optuna
68
69
70 def objective(trial):
71 # The following suggest method specifies integer points between -5 and 5.
72 x = trial.suggest_float("x", -5, 5, step=1)
73 return x**2
74
75
76 # Non-int points are specified in the grid.
77 search_space = {"x": [-0.5, 0.5]}
78 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
79 study.optimize(objective, n_trials=2)
80
81 Note:
82 A parameter configuration in the grid is not considered finished until its trial is
83 finished. Therefore, during distributed optimization where trials run concurrently,
84 different workers will occasionally suggest the same parameter configuration.
85 The total number of actual trials may therefore exceed the size of the grid.
86
87 Note:
88 The grid is randomly shuffled and the order in which parameter configurations are
89 suggested may vary. This is to reduce duplicate suggestions during distributed
90 optimization.
91
92 Note:
93 All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with
94 :meth:`~optuna.study.Study.enqueue_trial`.
95
96 Args:
97 search_space:
98 A dictionary whose key and value are a parameter name and the corresponding candidates
99 of values, respectively.
100 """
101
102 def __init__(self, search_space: Mapping[str, Sequence[GridValueType]]) -> None:
103
104 for param_name, param_values in search_space.items():
105 for value in param_values:
106 self._check_value(param_name, value)
107
108 self._search_space = collections.OrderedDict()
109 for param_name, param_values in sorted(search_space.items(), key=lambda x: x[0]):
110 param_values = cast(SortableParamValueSequenceType, param_values)
111
112 self._search_space[param_name] = sorted(param_values)
113
114 self._all_grids = list(itertools.product(*self._search_space.values()))
115 self._param_names = sorted(search_space.keys())
116 self._n_min_trials = len(self._all_grids)
117
118 def infer_relative_search_space(
119 self, study: Study, trial: FrozenTrial
120 ) -> Dict[str, BaseDistribution]:
121
122 return {}
123
124 def sample_relative(
125 self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]
126 ) -> Dict[str, Any]:
127 # Instead of returning param values, GridSampler puts the target grid id as a system attr,
128 # and the values are returned from `sample_independent`. This is because the distribution
129 # object is hard to get at the beginning of trial, while we need the access to the object
130 # to validate the sampled value.
131
132 # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not
133 # assign a new grid_id.
134 if "grid_id" in trial.system_attrs or "fixed_params" in trial.system_attrs:
135 return {}
136
137 target_grids = self._get_unvisited_grid_ids(study)
138
139 if len(target_grids) == 0:
140 # This case may occur with distributed optimization or trial queue. If there is no
141 # target grid, `GridSampler` evaluates a visited, duplicated point with the current
142 # trial. After that, the optimization stops.
143
144 _logger.warning(
145 "`GridSampler` is re-evaluating a configuration because the grid has been "
146 "exhausted. This may happen due to a timing issue during distributed optimization "
147 "or when re-running optimizations on already finished studies."
148 )
149
150 # One of all grids is randomly picked up in this case.
151 target_grids = list(range(len(self._all_grids)))
152
153 # In distributed optimization, multiple workers may simultaneously pick up the same grid.
154 # To make the conflict less frequent, the grid is chosen randomly.
155 grid_id = random.choice(target_grids)
156
157 study._storage.set_trial_system_attr(trial._trial_id, "search_space", self._search_space)
158 study._storage.set_trial_system_attr(trial._trial_id, "grid_id", grid_id)
159
160 return {}
161
162 def sample_independent(
163 self,
164 study: Study,
165 trial: FrozenTrial,
166 param_name: str,
167 param_distribution: BaseDistribution,
168 ) -> Any:
169
170 if "grid_id" not in trial.system_attrs:
171 message = "All parameters must be specified when using GridSampler with enqueue_trial."
172 raise ValueError(message)
173
174 if param_name not in self._search_space:
175 message = "The parameter name, {}, is not found in the given grid.".format(param_name)
176 raise ValueError(message)
177
178 # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.
179 # Current selection logic may evaluate the same parameters multiple times.
180 # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.
181 grid_id = trial.system_attrs["grid_id"]
182 param_value = self._all_grids[grid_id][self._param_names.index(param_name)]
183 contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))
184 if not contains:
185 warnings.warn(
186 f"The value `{param_value}` is out of range of the parameter `{param_name}`. "
187 f"The value will be used but the actual distribution is: `{param_distribution}`."
188 )
189
190 return param_value
191
192 def after_trial(
193 self,
194 study: Study,
195 trial: FrozenTrial,
196 state: TrialState,
197 values: Optional[Sequence[float]],
198 ) -> None:
199 target_grids = self._get_unvisited_grid_ids(study)
200
201 if len(target_grids) == 0:
202 study.stop()
203 elif len(target_grids) == 1:
204 grid_id = study._storage.get_trial_system_attrs(trial._trial_id)["grid_id"]
205 if grid_id == target_grids[0]:
206 study.stop()
207
208 @staticmethod
209 def _check_value(param_name: str, param_value: Any) -> None:
210
211 if param_value is None or isinstance(param_value, (str, int, float, bool)):
212 return
213
214 raise ValueError(
215 "{} contains a value with the type of {}, which is not supported by "
216 "`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`"
217 " or `None`.".format(param_name, type(param_value))
218 )
219
220 def _get_unvisited_grid_ids(self, study: Study) -> List[int]:
221
222 # List up unvisited grids based on already finished ones.
223 visited_grids = []
224 running_grids = []
225
226 # We directly query the storage to get trials here instead of `study.get_trials`,
227 # since some pruners such as `HyperbandPruner` use the study transformed
228 # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.
229 trials = study._storage.get_all_trials(study._study_id, deepcopy=False)
230
231 for t in trials:
232 if "grid_id" in t.system_attrs and self._same_search_space(
233 t.system_attrs["search_space"]
234 ):
235 if t.state.is_finished():
236 visited_grids.append(t.system_attrs["grid_id"])
237 elif t.state == TrialState.RUNNING:
238 running_grids.append(t.system_attrs["grid_id"])
239
240 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)
241
242 # If evaluations for all grids have been started, return grids that have not yet finished
243 # because all grids should be evaluated before stopping the optimization.
244 if len(unvisited_grids) == 0:
245 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)
246
247 return list(unvisited_grids)
248
249 def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:
250
251 if set(search_space.keys()) != set(self._search_space.keys()):
252 return False
253
254 for param_name in search_space.keys():
255 if len(search_space[param_name]) != len(self._search_space[param_name]):
256 return False
257
258 param_values = cast(SortableParamValueSequenceType, search_space[param_name])
259 for i, param_value in enumerate(sorted(param_values)):
260 if param_value != self._search_space[param_name][i]:
261 return False
262
263 return True
264
[end of optuna/samplers/_grid.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py
--- a/optuna/samplers/_grid.py
+++ b/optuna/samplers/_grid.py
@@ -211,11 +211,12 @@
if param_value is None or isinstance(param_value, (str, int, float, bool)):
return
- raise ValueError(
+ message = (
"{} contains a value with the type of {}, which is not supported by "
"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`"
- " or `None`.".format(param_name, type(param_value))
+ " or `None` for persistent storage.".format(param_name, type(param_value))
)
+ warnings.warn(message)
def _get_unvisited_grid_ids(self, study: Study) -> List[int]:
| {"golden_diff": "diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py\n--- a/optuna/samplers/_grid.py\n+++ b/optuna/samplers/_grid.py\n@@ -211,11 +211,12 @@\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n \n- raise ValueError(\n+ message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n- \" or `None`.\".format(param_name, type(param_value))\n+ \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n+ warnings.warn(message)\n \n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n", "issue": "Show warning message by `GridSearchSampler` whose `CategoricalDistribution` contains unsupported typed values, not raising `ValueError`\n### Motivation\n\nThe grid search sampler has a strict type rule for its search space for `CategoricalDistribution`, which is not consistent with the other samplers as reported in #3534.\r\n\n\n### Description\n\n`CategoriaclDistribution` shows a warning message when the `choices` argument contains an unsupported type.\r\n\r\nWhen we run the following code\r\n```python\r\nimport optuna\r\n\r\noptuna.distributions.CategoricalDistribution([[1], [2]])\r\n```\r\n\r\nthen we see the following warning message:\r\n\r\n```bash\r\n/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [1] which is of type list.\r\n warnings.warn(message)\r\n/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [2] which is of type list.\r\n warnings.warn(message)\r\n```\r\n\r\n\r\nOn the other hand, the grid search sampler raises `ValueError` when we use such unsupported typed value as an element of `choices` of the distribution.\r\n\n\n### Alternatives (optional)\n\n_No response_\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "import collections\nimport itertools\nimport random\nfrom typing import Any\nfrom typing import cast\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\nSortableParamValueSequenceType = Union[List[str], List[float], List[int], List[bool]]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all\n combinations in the passed ``search_space`` have already been evaluated, internally\n invoking the :func:`~optuna.study.Study.stop` method.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n The grid is randomly shuffled and the order in which parameter configurations are\n suggested may vary. This is to reduce duplicate suggestions during distributed\n optimization.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n \"\"\"\n\n def __init__(self, search_space: Mapping[str, Sequence[GridValueType]]) -> None:\n\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = collections.OrderedDict()\n for param_name, param_values in sorted(search_space.items(), key=lambda x: x[0]):\n param_values = cast(SortableParamValueSequenceType, param_values)\n\n self._search_space[param_name] = sorted(param_values)\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return {}\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = random.choice(target_grids)\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n raise ValueError(\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None`.\".format(param_name, type(param_value))\n )\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n param_values = cast(SortableParamValueSequenceType, search_space[param_name])\n for i, param_value in enumerate(sorted(param_values)):\n if param_value != self._search_space[param_name][i]:\n return False\n\n return True\n", "path": "optuna/samplers/_grid.py"}]} | 3,824 | 195 |
gh_patches_debug_14570 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
localization: various problems
# Bug
## Description
This issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**
They should be fixed in a pull request right after the 0.4.4 release.
</issue>
<code>
[start of securedrop/source_app/forms.py]
1 from flask_babel import gettext
2 from flask_wtf import FlaskForm
3 from wtforms import PasswordField
4 from wtforms.validators import InputRequired, Regexp, Length
5
6 from db import Source
7
8
9 class LoginForm(FlaskForm):
10 codename = PasswordField('codename', validators=[
11 InputRequired(message=gettext('This field is required.')),
12 Length(1, Source.MAX_CODENAME_LEN,
13 message=gettext('Field must be between 1 and '
14 '{max_codename_len} characters long. '.format(
15 max_codename_len=Source.MAX_CODENAME_LEN))),
16 # Make sure to allow dashes since some words in the wordlist have them
17 Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
18 ])
19
[end of securedrop/source_app/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py
--- a/securedrop/source_app/forms.py
+++ b/securedrop/source_app/forms.py
@@ -11,7 +11,7 @@
InputRequired(message=gettext('This field is required.')),
Length(1, Source.MAX_CODENAME_LEN,
message=gettext('Field must be between 1 and '
- '{max_codename_len} characters long. '.format(
+ '{max_codename_len} characters long.'.format(
max_codename_len=Source.MAX_CODENAME_LEN))),
# Make sure to allow dashes since some words in the wordlist have them
Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
| {"golden_diff": "diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py\n--- a/securedrop/source_app/forms.py\n+++ b/securedrop/source_app/forms.py\n@@ -11,7 +11,7 @@\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n- '{max_codename_len} characters long. '.format(\n+ '{max_codename_len} characters long.'.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n", "issue": "localization: various problems\n# Bug\r\n\r\n## Description\r\n\r\nThis issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**\r\n\r\nThey should be fixed in a pull request right after the 0.4.4 release.\n", "before_files": [{"content": "from flask_babel import gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField\nfrom wtforms.validators import InputRequired, Regexp, Length\n\nfrom db import Source\n\n\nclass LoginForm(FlaskForm):\n codename = PasswordField('codename', validators=[\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n '{max_codename_len} characters long. '.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n ])\n", "path": "securedrop/source_app/forms.py"}]} | 790 | 169 |
gh_patches_debug_64451 | rasdani/github-patches | git_diff | bokeh__bokeh-9477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
bokeh HEAD e605297
gunicorn (version 20.0.4)
Python 3.7.4
macOS 10.14.6
#### Description of expected behavior and the observed behavior
I am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.
Marking the bk_worker thread as a daemon before starting it resolves the hang.
#### Stack traceback and/or browser JavaScript console output
greent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app
[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4
[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)
[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync
[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815
[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816
[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817
[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818
^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int
[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)
[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)
[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)
[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)
If I hit Ctrl-C again, it continues and exits noisily:
^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1308, in _shutdown
lock.acquire()
File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 196, in handle_quit
sys.exit(0)
SystemExit: 0
[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master
[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
bokeh HEAD e605297
gunicorn (version 20.0.4)
Python 3.7.4
macOS 10.14.6
#### Description of expected behavior and the observed behavior
I am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.
Marking the bk_worker thread as a daemon before starting it resolves the hang.
#### Stack traceback and/or browser JavaScript console output
greent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app
[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4
[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)
[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync
[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815
[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816
[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817
[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818
^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int
[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)
[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)
[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)
[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)
If I hit Ctrl-C again, it continues and exits noisily:
^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1308, in _shutdown
lock.acquire()
File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 196, in handle_quit
sys.exit(0)
SystemExit: 0
[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master
</issue>
<code>
[start of examples/howto/server_embed/flask_gunicorn_embed.py]
1 try:
2 import asyncio
3 except ImportError:
4 raise RuntimeError("This example requries Python3 / asyncio")
5
6 from threading import Thread
7
8 from flask import Flask, render_template
9 from tornado.httpserver import HTTPServer
10 from tornado.ioloop import IOLoop
11
12 from bokeh.application import Application
13 from bokeh.application.handlers import FunctionHandler
14 from bokeh.embed import server_document
15 from bokeh.layouts import column
16 from bokeh.models import ColumnDataSource, Slider
17 from bokeh.plotting import figure
18 from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
19 from bokeh.server.server import BaseServer
20 from bokeh.server.tornado import BokehTornado
21 from bokeh.server.util import bind_sockets
22 from bokeh.themes import Theme
23
24 if __name__ == '__main__':
25 print('This script is intended to be run with gunicorn. e.g.')
26 print()
27 print(' gunicorn -w 4 flask_gunicorn_embed:app')
28 print()
29 print('will start the app on four processes')
30 import sys
31 sys.exit()
32
33
34 app = Flask(__name__)
35
36 def bkapp(doc):
37 df = sea_surface_temperature.copy()
38 source = ColumnDataSource(data=df)
39
40 plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',
41 title="Sea Surface Temperature at 43.18, -70.43")
42 plot.line('time', 'temperature', source=source)
43
44 def callback(attr, old, new):
45 if new == 0:
46 data = df
47 else:
48 data = df.rolling('{0}D'.format(new)).mean()
49 source.data = ColumnDataSource.from_df(data)
50
51 slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days")
52 slider.on_change('value', callback)
53
54 doc.add_root(column(slider, plot))
55
56 doc.theme = Theme(filename="theme.yaml")
57
58 # can't use shortcuts here, since we are passing to low level BokehTornado
59 bkapp = Application(FunctionHandler(bkapp))
60
61 # This is so that if this app is run using something like "gunicorn -w 4" then
62 # each process will listen on its own port
63 sockets, port = bind_sockets("localhost", 0)
64
65 @app.route('/', methods=['GET'])
66 def bkapp_page():
67 script = server_document('http://localhost:%d/bkapp' % port)
68 return render_template("embed.html", script=script, template="Flask")
69
70 def bk_worker():
71 asyncio.set_event_loop(asyncio.new_event_loop())
72
73 bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=["localhost:8000"])
74 bokeh_http = HTTPServer(bokeh_tornado)
75 bokeh_http.add_sockets(sockets)
76
77 server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)
78 server.start()
79 server.io_loop.start()
80
81 Thread(target=bk_worker).start()
82
[end of examples/howto/server_embed/flask_gunicorn_embed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/howto/server_embed/flask_gunicorn_embed.py b/examples/howto/server_embed/flask_gunicorn_embed.py
--- a/examples/howto/server_embed/flask_gunicorn_embed.py
+++ b/examples/howto/server_embed/flask_gunicorn_embed.py
@@ -78,4 +78,6 @@
server.start()
server.io_loop.start()
-Thread(target=bk_worker).start()
+t = Thread(target=bk_worker)
+t.daemon = True
+t.start()
| {"golden_diff": "diff --git a/examples/howto/server_embed/flask_gunicorn_embed.py b/examples/howto/server_embed/flask_gunicorn_embed.py\n--- a/examples/howto/server_embed/flask_gunicorn_embed.py\n+++ b/examples/howto/server_embed/flask_gunicorn_embed.py\n@@ -78,4 +78,6 @@\n server.start()\n server.io_loop.start()\n \n-Thread(target=bk_worker).start()\n+t = Thread(target=bk_worker)\n+t.daemon = True\n+t.start()\n", "issue": "[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nbokeh HEAD e605297\r\ngunicorn (version 20.0.4)\r\nPython 3.7.4\r\nmacOS 10.14.6\r\n\r\n#### Description of expected behavior and the observed behavior\r\nI am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.\r\n\r\nMarking the bk_worker thread as a daemon before starting it resolves the hang.\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\ngreent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync\r\n[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815\r\n[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816\r\n[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817\r\n[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818\r\n^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int\r\n[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)\r\n[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)\r\n[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)\r\n[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)\r\n\r\nIf I hit Ctrl-C again, it continues and exits noisily:\r\n\r\n^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py\", line 1308, in _shutdown\r\n lock.acquire()\r\n File \"/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 196, in handle_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master\r\n\n[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nbokeh HEAD e605297\r\ngunicorn (version 20.0.4)\r\nPython 3.7.4\r\nmacOS 10.14.6\r\n\r\n#### Description of expected behavior and the observed behavior\r\nI am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.\r\n\r\nMarking the bk_worker thread as a daemon before starting it resolves the hang.\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\ngreent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync\r\n[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815\r\n[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816\r\n[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817\r\n[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818\r\n^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int\r\n[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)\r\n[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)\r\n[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)\r\n[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)\r\n\r\nIf I hit Ctrl-C again, it continues and exits noisily:\r\n\r\n^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py\", line 1308, in _shutdown\r\n lock.acquire()\r\n File \"/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 196, in handle_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master\r\n\n", "before_files": [{"content": "try:\n import asyncio\nexcept ImportError:\n raise RuntimeError(\"This example requries Python3 / asyncio\")\n\nfrom threading import Thread\n\nfrom flask import Flask, render_template\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.embed import server_document\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, Slider\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.sea_surface_temperature import sea_surface_temperature\nfrom bokeh.server.server import BaseServer\nfrom bokeh.server.tornado import BokehTornado\nfrom bokeh.server.util import bind_sockets\nfrom bokeh.themes import Theme\n\nif __name__ == '__main__':\n print('This script is intended to be run with gunicorn. e.g.')\n print()\n print(' gunicorn -w 4 flask_gunicorn_embed:app')\n print()\n print('will start the app on four processes')\n import sys\n sys.exit()\n\n\napp = Flask(__name__)\n\ndef bkapp(doc):\n df = sea_surface_temperature.copy()\n source = ColumnDataSource(data=df)\n\n plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',\n title=\"Sea Surface Temperature at 43.18, -70.43\")\n plot.line('time', 'temperature', source=source)\n\n def callback(attr, old, new):\n if new == 0:\n data = df\n else:\n data = df.rolling('{0}D'.format(new)).mean()\n source.data = ColumnDataSource.from_df(data)\n\n slider = Slider(start=0, end=30, value=0, step=1, title=\"Smoothing by N Days\")\n slider.on_change('value', callback)\n\n doc.add_root(column(slider, plot))\n\n doc.theme = Theme(filename=\"theme.yaml\")\n\n# can't use shortcuts here, since we are passing to low level BokehTornado\nbkapp = Application(FunctionHandler(bkapp))\n\n# This is so that if this app is run using something like \"gunicorn -w 4\" then\n# each process will listen on its own port\nsockets, port = bind_sockets(\"localhost\", 0)\n\[email protected]('/', methods=['GET'])\ndef bkapp_page():\n script = server_document('http://localhost:%d/bkapp' % port)\n return render_template(\"embed.html\", script=script, template=\"Flask\")\n\ndef bk_worker():\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=[\"localhost:8000\"])\n bokeh_http = HTTPServer(bokeh_tornado)\n bokeh_http.add_sockets(sockets)\n\n server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)\n server.start()\n server.io_loop.start()\n\nThread(target=bk_worker).start()\n", "path": "examples/howto/server_embed/flask_gunicorn_embed.py"}]} | 3,408 | 105 |
gh_patches_debug_43357 | rasdani/github-patches | git_diff | streamlink__streamlink-4840 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.rtve: ZTNR.translate() runs endlessly
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
RTVE plugin is not resolving any streams and stuck after message "Found matching plugin rtve for URL". Upon debugging in Python the following while block seems to cause an endless loop: https://github.com/streamlink/streamlink/blob/master/src/streamlink/plugins/rtve.py#L111
Thanks for the good work!
Cheers.
### Debug log
```text
bin\streamlink.exe -l debug https://rtve.es/play/videos/directo/canales-lineales/24h
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.10.7
[cli][debug] Streamlink: 5.0.0
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] websocket-client: 1.4.1
[cli][debug] Arguments:
[cli][debug] url=https://rtve.es/play/videos/directo/canales-lineales/24h
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin rtve for URL https://rtve.es/play/videos/directo/canales-lineales/24h
```
</issue>
<code>
[start of src/streamlink/plugins/rtve.py]
1 """
2 $description Live TV channels and video on-demand service from RTVE, a Spanish public, state-owned broadcaster.
3 $url rtve.es
4 $type live, vod
5 $region Spain
6 """
7
8 import logging
9 import re
10 from base64 import b64decode
11 from io import BytesIO
12 from typing import Iterator, Sequence, Tuple
13 from urllib.parse import urlparse
14
15 from streamlink.plugin import Plugin, pluginargument, pluginmatcher
16 from streamlink.plugin.api import validate
17 from streamlink.stream.ffmpegmux import MuxedStream
18 from streamlink.stream.hls import HLSStream
19 from streamlink.stream.http import HTTPStream
20 from streamlink.utils.url import update_scheme
21
22 log = logging.getLogger(__name__)
23
24
25 class Base64Reader:
26 def __init__(self, data: str):
27 stream = BytesIO(b64decode(data))
28
29 def _iterate():
30 while True:
31 chunk = stream.read(1)
32 if len(chunk) == 0: # pragma: no cover
33 return
34 yield ord(chunk)
35
36 self._iterator: Iterator[int] = _iterate()
37
38 def read(self, num: int) -> Sequence[int]:
39 res = []
40 for _ in range(num):
41 item = next(self._iterator, None)
42 if item is None: # pragma: no cover
43 break
44 res.append(item)
45 return res
46
47 def skip(self, num: int) -> None:
48 self.read(num)
49
50 def read_chars(self, num: int) -> str:
51 return "".join(chr(item) for item in self.read(num))
52
53 def read_int(self) -> int:
54 a, b, c, d = self.read(4)
55 return a << 24 | b << 16 | c << 8 | d
56
57 def read_chunk(self) -> Tuple[str, Sequence[int]]:
58 size = self.read_int()
59 chunktype = self.read_chars(4)
60 chunkdata = self.read(size)
61 if len(chunkdata) != size: # pragma: no cover
62 raise ValueError("Invalid chunk length")
63 self.skip(4)
64 return chunktype, chunkdata
65
66
67 class ZTNR:
68 @staticmethod
69 def _get_alphabet(text: str) -> str:
70 res = []
71 j = 0
72 k = 0
73 for char in text:
74 if k > 0:
75 k -= 1
76 else:
77 res.append(char)
78 j = (j + 1) % 4
79 k = j
80 return "".join(res)
81
82 @staticmethod
83 def _get_url(text: str, alphabet: str) -> str:
84 res = []
85 j = 0
86 n = 0
87 k = 3
88 cont = 0
89 for char in text:
90 if j == 0:
91 n = int(char) * 10
92 j = 1
93 elif k > 0:
94 k -= 1
95 else:
96 res.append(alphabet[n + int(char)])
97 j = 0
98 k = cont % 4
99 cont += 1
100 return "".join(res)
101
102 @classmethod
103 def _get_source(cls, alphabet: str, data: str) -> str:
104 return cls._get_url(data, cls._get_alphabet(alphabet))
105
106 @classmethod
107 def translate(cls, data: str) -> Iterator[Tuple[str, str]]:
108 reader = Base64Reader(data.replace("\n", ""))
109 reader.skip(8)
110 chunk_type, chunk_data = reader.read_chunk()
111 while chunk_type != "IEND":
112 if chunk_type == "tEXt":
113 content = "".join(chr(item) for item in chunk_data if item > 0)
114 if "#" not in content or "%%" not in content: # pragma: no cover
115 continue
116 alphabet, content = content.split("#", 1)
117 quality, content = content.split("%%", 1)
118 yield quality, cls._get_source(alphabet, content)
119 chunk_type, chunk_data = reader.read_chunk()
120
121
122 @pluginmatcher(re.compile(
123 r"https?://(?:www\.)?rtve\.es/play/videos/.+"
124 ))
125 @pluginargument(
126 "mux-subtitles",
127 is_global=True,
128 )
129 class Rtve(Plugin):
130 URL_M3U8 = "https://ztnr.rtve.es/ztnr/{id}.m3u8"
131 URL_VIDEOS = "https://ztnr.rtve.es/ztnr/movil/thumbnail/rtveplayw/videos/{id}.png?q=v2"
132 URL_SUBTITLES = "https://www.rtve.es/api/videos/{id}/subtitulos.json"
133
134 def _get_streams(self):
135 self.id = self.session.http.get(self.url, schema=validate.Schema(
136 re.compile(r"\bdata-setup='({.+?})'", re.DOTALL),
137 validate.none_or_all(
138 validate.get(1),
139 validate.parse_json(),
140 {
141 "idAsset": validate.any(int, validate.all(str, validate.transform(int))),
142 },
143 validate.get("idAsset"),
144 ),
145 ))
146 if not self.id:
147 return
148
149 # check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first
150 # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases
151 urls = self.session.http.get(
152 self.URL_VIDEOS.format(id=self.id),
153 schema=validate.Schema(
154 validate.transform(ZTNR.translate),
155 validate.transform(list),
156 [(str, validate.url())],
157 ),
158 )
159
160 # then fall back to self.URL_M3U8
161 if not urls:
162 url = self.URL_M3U8.format(id=self.id)
163 else:
164 url = next((url for _, url in urls if urlparse(url).path.endswith(".m3u8")), None)
165 if not url:
166 url = next((url for _, url in urls if urlparse(url).path.endswith(".mp4")), None)
167 if url:
168 yield "vod", HTTPStream(self.session, url)
169 return
170
171 streams = HLSStream.parse_variant_playlist(self.session, url).items()
172
173 if self.options.get("mux-subtitles"):
174 subs = self.session.http.get(
175 self.URL_SUBTITLES.format(id=self.id),
176 schema=validate.Schema(
177 validate.parse_json(),
178 {
179 "page": {
180 "items": [{
181 "lang": str,
182 "src": validate.url(),
183 }],
184 },
185 },
186 validate.get(("page", "items")),
187 ),
188 )
189 if subs:
190 subtitles = {
191 s["lang"]: HTTPStream(self.session, update_scheme("https://", s["src"], force=True))
192 for s in subs
193 }
194 for quality, stream in streams:
195 yield quality, MuxedStream(self.session, stream, subtitles=subtitles)
196 return
197
198 yield from streams
199
200
201 __plugin__ = Rtve
202
[end of src/streamlink/plugins/rtve.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/rtve.py b/src/streamlink/plugins/rtve.py
--- a/src/streamlink/plugins/rtve.py
+++ b/src/streamlink/plugins/rtve.py
@@ -12,7 +12,7 @@
from typing import Iterator, Sequence, Tuple
from urllib.parse import urlparse
-from streamlink.plugin import Plugin, pluginargument, pluginmatcher
+from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.ffmpegmux import MuxedStream
from streamlink.stream.hls import HLSStream
@@ -29,7 +29,7 @@
def _iterate():
while True:
chunk = stream.read(1)
- if len(chunk) == 0: # pragma: no cover
+ if len(chunk) == 0:
return
yield ord(chunk)
@@ -39,7 +39,7 @@
res = []
for _ in range(num):
item = next(self._iterator, None)
- if item is None: # pragma: no cover
+ if item is None:
break
res.append(item)
return res
@@ -63,6 +63,14 @@
self.skip(4)
return chunktype, chunkdata
+ def __iter__(self):
+ self.skip(8)
+ while True:
+ try:
+ yield self.read_chunk()
+ except ValueError:
+ return
+
class ZTNR:
@staticmethod
@@ -106,17 +114,16 @@
@classmethod
def translate(cls, data: str) -> Iterator[Tuple[str, str]]:
reader = Base64Reader(data.replace("\n", ""))
- reader.skip(8)
- chunk_type, chunk_data = reader.read_chunk()
- while chunk_type != "IEND":
+ for chunk_type, chunk_data in reader:
+ if chunk_type == "IEND":
+ break
if chunk_type == "tEXt":
content = "".join(chr(item) for item in chunk_data if item > 0)
- if "#" not in content or "%%" not in content: # pragma: no cover
+ if "#" not in content or "%%" not in content:
continue
alphabet, content = content.split("#", 1)
quality, content = content.split("%%", 1)
yield quality, cls._get_source(alphabet, content)
- chunk_type, chunk_data = reader.read_chunk()
@pluginmatcher(re.compile(
@@ -147,18 +154,19 @@
return
# check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first
- # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases
- urls = self.session.http.get(
- self.URL_VIDEOS.format(id=self.id),
- schema=validate.Schema(
- validate.transform(ZTNR.translate),
- validate.transform(list),
- [(str, validate.url())],
- ),
- )
-
- # then fall back to self.URL_M3U8
- if not urls:
+ # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in some cases
+ try:
+ urls = self.session.http.get(
+ self.URL_VIDEOS.format(id=self.id),
+ schema=validate.Schema(
+ validate.transform(ZTNR.translate),
+ validate.transform(list),
+ [(str, validate.url())],
+ validate.length(1),
+ ),
+ )
+ except PluginError:
+ # catch HTTP errors and validation errors, and fall back to generic HLS URL template
url = self.URL_M3U8.format(id=self.id)
else:
url = next((url for _, url in urls if urlparse(url).path.endswith(".m3u8")), None)
| {"golden_diff": "diff --git a/src/streamlink/plugins/rtve.py b/src/streamlink/plugins/rtve.py\n--- a/src/streamlink/plugins/rtve.py\n+++ b/src/streamlink/plugins/rtve.py\n@@ -12,7 +12,7 @@\n from typing import Iterator, Sequence, Tuple\n from urllib.parse import urlparse\n \n-from streamlink.plugin import Plugin, pluginargument, pluginmatcher\n+from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher\n from streamlink.plugin.api import validate\n from streamlink.stream.ffmpegmux import MuxedStream\n from streamlink.stream.hls import HLSStream\n@@ -29,7 +29,7 @@\n def _iterate():\n while True:\n chunk = stream.read(1)\n- if len(chunk) == 0: # pragma: no cover\n+ if len(chunk) == 0:\n return\n yield ord(chunk)\n \n@@ -39,7 +39,7 @@\n res = []\n for _ in range(num):\n item = next(self._iterator, None)\n- if item is None: # pragma: no cover\n+ if item is None:\n break\n res.append(item)\n return res\n@@ -63,6 +63,14 @@\n self.skip(4)\n return chunktype, chunkdata\n \n+ def __iter__(self):\n+ self.skip(8)\n+ while True:\n+ try:\n+ yield self.read_chunk()\n+ except ValueError:\n+ return\n+\n \n class ZTNR:\n @staticmethod\n@@ -106,17 +114,16 @@\n @classmethod\n def translate(cls, data: str) -> Iterator[Tuple[str, str]]:\n reader = Base64Reader(data.replace(\"\\n\", \"\"))\n- reader.skip(8)\n- chunk_type, chunk_data = reader.read_chunk()\n- while chunk_type != \"IEND\":\n+ for chunk_type, chunk_data in reader:\n+ if chunk_type == \"IEND\":\n+ break\n if chunk_type == \"tEXt\":\n content = \"\".join(chr(item) for item in chunk_data if item > 0)\n- if \"#\" not in content or \"%%\" not in content: # pragma: no cover\n+ if \"#\" not in content or \"%%\" not in content:\n continue\n alphabet, content = content.split(\"#\", 1)\n quality, content = content.split(\"%%\", 1)\n yield quality, cls._get_source(alphabet, content)\n- chunk_type, chunk_data = reader.read_chunk()\n \n \n @pluginmatcher(re.compile(\n@@ -147,18 +154,19 @@\n return\n \n # check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first\n- # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases\n- urls = self.session.http.get(\n- self.URL_VIDEOS.format(id=self.id),\n- schema=validate.Schema(\n- validate.transform(ZTNR.translate),\n- validate.transform(list),\n- [(str, validate.url())],\n- ),\n- )\n-\n- # then fall back to self.URL_M3U8\n- if not urls:\n+ # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in some cases\n+ try:\n+ urls = self.session.http.get(\n+ self.URL_VIDEOS.format(id=self.id),\n+ schema=validate.Schema(\n+ validate.transform(ZTNR.translate),\n+ validate.transform(list),\n+ [(str, validate.url())],\n+ validate.length(1),\n+ ),\n+ )\n+ except PluginError:\n+ # catch HTTP errors and validation errors, and fall back to generic HLS URL template\n url = self.URL_M3U8.format(id=self.id)\n else:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".m3u8\")), None)\n", "issue": "plugins.rtve: ZTNR.translate() runs endlessly\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nRTVE plugin is not resolving any streams and stuck after message \"Found matching plugin rtve for URL\". Upon debugging in Python the following while block seems to cause an endless loop: https://github.com/streamlink/streamlink/blob/master/src/streamlink/plugins/rtve.py#L111\r\n\r\nThanks for the good work!\r\n \r\nCheers.\n\n### Debug log\n\n```text\nbin\\streamlink.exe -l debug https://rtve.es/play/videos/directo/canales-lineales/24h\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.10.7\r\n[cli][debug] Streamlink: 5.0.0\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] websocket-client: 1.4.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://rtve.es/play/videos/directo/canales-lineales/24h\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin rtve for URL https://rtve.es/play/videos/directo/canales-lineales/24h\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels and video on-demand service from RTVE, a Spanish public, state-owned broadcaster.\n$url rtve.es\n$type live, vod\n$region Spain\n\"\"\"\n\nimport logging\nimport re\nfrom base64 import b64decode\nfrom io import BytesIO\nfrom typing import Iterator, Sequence, Tuple\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginargument, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.ffmpegmux import MuxedStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\nfrom streamlink.utils.url import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\nclass Base64Reader:\n def __init__(self, data: str):\n stream = BytesIO(b64decode(data))\n\n def _iterate():\n while True:\n chunk = stream.read(1)\n if len(chunk) == 0: # pragma: no cover\n return\n yield ord(chunk)\n\n self._iterator: Iterator[int] = _iterate()\n\n def read(self, num: int) -> Sequence[int]:\n res = []\n for _ in range(num):\n item = next(self._iterator, None)\n if item is None: # pragma: no cover\n break\n res.append(item)\n return res\n\n def skip(self, num: int) -> None:\n self.read(num)\n\n def read_chars(self, num: int) -> str:\n return \"\".join(chr(item) for item in self.read(num))\n\n def read_int(self) -> int:\n a, b, c, d = self.read(4)\n return a << 24 | b << 16 | c << 8 | d\n\n def read_chunk(self) -> Tuple[str, Sequence[int]]:\n size = self.read_int()\n chunktype = self.read_chars(4)\n chunkdata = self.read(size)\n if len(chunkdata) != size: # pragma: no cover\n raise ValueError(\"Invalid chunk length\")\n self.skip(4)\n return chunktype, chunkdata\n\n\nclass ZTNR:\n @staticmethod\n def _get_alphabet(text: str) -> str:\n res = []\n j = 0\n k = 0\n for char in text:\n if k > 0:\n k -= 1\n else:\n res.append(char)\n j = (j + 1) % 4\n k = j\n return \"\".join(res)\n\n @staticmethod\n def _get_url(text: str, alphabet: str) -> str:\n res = []\n j = 0\n n = 0\n k = 3\n cont = 0\n for char in text:\n if j == 0:\n n = int(char) * 10\n j = 1\n elif k > 0:\n k -= 1\n else:\n res.append(alphabet[n + int(char)])\n j = 0\n k = cont % 4\n cont += 1\n return \"\".join(res)\n\n @classmethod\n def _get_source(cls, alphabet: str, data: str) -> str:\n return cls._get_url(data, cls._get_alphabet(alphabet))\n\n @classmethod\n def translate(cls, data: str) -> Iterator[Tuple[str, str]]:\n reader = Base64Reader(data.replace(\"\\n\", \"\"))\n reader.skip(8)\n chunk_type, chunk_data = reader.read_chunk()\n while chunk_type != \"IEND\":\n if chunk_type == \"tEXt\":\n content = \"\".join(chr(item) for item in chunk_data if item > 0)\n if \"#\" not in content or \"%%\" not in content: # pragma: no cover\n continue\n alphabet, content = content.split(\"#\", 1)\n quality, content = content.split(\"%%\", 1)\n yield quality, cls._get_source(alphabet, content)\n chunk_type, chunk_data = reader.read_chunk()\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?rtve\\.es/play/videos/.+\"\n))\n@pluginargument(\n \"mux-subtitles\",\n is_global=True,\n)\nclass Rtve(Plugin):\n URL_M3U8 = \"https://ztnr.rtve.es/ztnr/{id}.m3u8\"\n URL_VIDEOS = \"https://ztnr.rtve.es/ztnr/movil/thumbnail/rtveplayw/videos/{id}.png?q=v2\"\n URL_SUBTITLES = \"https://www.rtve.es/api/videos/{id}/subtitulos.json\"\n\n def _get_streams(self):\n self.id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\\bdata-setup='({.+?})'\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n {\n \"idAsset\": validate.any(int, validate.all(str, validate.transform(int))),\n },\n validate.get(\"idAsset\"),\n ),\n ))\n if not self.id:\n return\n\n # check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first\n # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases\n urls = self.session.http.get(\n self.URL_VIDEOS.format(id=self.id),\n schema=validate.Schema(\n validate.transform(ZTNR.translate),\n validate.transform(list),\n [(str, validate.url())],\n ),\n )\n\n # then fall back to self.URL_M3U8\n if not urls:\n url = self.URL_M3U8.format(id=self.id)\n else:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".m3u8\")), None)\n if not url:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".mp4\")), None)\n if url:\n yield \"vod\", HTTPStream(self.session, url)\n return\n\n streams = HLSStream.parse_variant_playlist(self.session, url).items()\n\n if self.options.get(\"mux-subtitles\"):\n subs = self.session.http.get(\n self.URL_SUBTITLES.format(id=self.id),\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"page\": {\n \"items\": [{\n \"lang\": str,\n \"src\": validate.url(),\n }],\n },\n },\n validate.get((\"page\", \"items\")),\n ),\n )\n if subs:\n subtitles = {\n s[\"lang\"]: HTTPStream(self.session, update_scheme(\"https://\", s[\"src\"], force=True))\n for s in subs\n }\n for quality, stream in streams:\n yield quality, MuxedStream(self.session, stream, subtitles=subtitles)\n return\n\n yield from streams\n\n\n__plugin__ = Rtve\n", "path": "src/streamlink/plugins/rtve.py"}]} | 3,061 | 877 |
gh_patches_debug_41609 | rasdani/github-patches | git_diff | getnikola__nikola-1292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
render fails if the theme has a code.css
The `conf.py` says:
```
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
```
I've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site
```
(blog)tin@morochita:~/lab/blog$ nikola build
Scanning posts.....done!
ERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.
(blog)tin@morochita:~/lab/blog$
```
</issue>
<code>
[start of nikola/plugins/task/copy_assets.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 import codecs
28 import os
29
30 from nikola.plugin_categories import Task
31 from nikola import utils
32
33
34 class CopyAssets(Task):
35 """Copy theme assets into output."""
36
37 name = "copy_assets"
38
39 def gen_tasks(self):
40 """Create tasks to copy the assets of the whole theme chain.
41
42 If a file is present on two themes, use the version
43 from the "youngest" theme.
44 """
45
46 kw = {
47 "themes": self.site.THEMES,
48 "output_folder": self.site.config['OUTPUT_FOLDER'],
49 "filters": self.site.config['FILTERS'],
50 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
51 "code.css_selectors": 'pre.code',
52 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
53 }
54 has_code_css = False
55 tasks = {}
56 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
57
58 yield self.group_task()
59
60 for theme_name in kw['themes']:
61 src = os.path.join(utils.get_theme_path(theme_name), 'assets')
62 dst = os.path.join(kw['output_folder'], 'assets')
63 for task in utils.copy_tree(src, dst):
64 if task['name'] in tasks:
65 continue
66 has_code_css = task['targets'][0] == code_css_path
67 tasks[task['name']] = task
68 task['uptodate'] = [utils.config_changed(kw)]
69 task['basename'] = self.name
70 yield utils.apply_filters(task, kw['filters'])
71
72 if not has_code_css: # Generate it
73
74 def create_code_css():
75 from pygments.formatters import get_formatter_by_name
76 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
77 utils.makedirs(os.path.dirname(code_css_path))
78 with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
79 outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
80 outf.write(kw["code.css_close"])
81
82 task = {
83 'basename': self.name,
84 'name': code_css_path,
85 'targets': [code_css_path],
86 'uptodate': [utils.config_changed(kw)],
87 'actions': [(create_code_css, [])],
88 'clean': True,
89 }
90 yield utils.apply_filters(task, kw['filters'])
91
[end of nikola/plugins/task/copy_assets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py
--- a/nikola/plugins/task/copy_assets.py
+++ b/nikola/plugins/task/copy_assets.py
@@ -45,15 +45,21 @@
kw = {
"themes": self.site.THEMES,
+ "files_folders": self.site.config['FILES_FOLDERS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
"code.css_selectors": 'pre.code',
+ "code.css_head": '/* code.css file generated by Nikola */\n',
"code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
}
- has_code_css = False
tasks = {}
code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
+ code_css_input = utils.get_asset_path('assets/css/code.css',
+ themes=kw['themes'],
+ files_folders=kw['files_folders'])
+
+ kw["code.css_input"] = code_css_input
yield self.group_task()
@@ -63,27 +69,35 @@
for task in utils.copy_tree(src, dst):
if task['name'] in tasks:
continue
- has_code_css = task['targets'][0] == code_css_path
tasks[task['name']] = task
task['uptodate'] = [utils.config_changed(kw)]
task['basename'] = self.name
+ if code_css_input:
+ task['file_dep'] = [code_css_input]
yield utils.apply_filters(task, kw['filters'])
- if not has_code_css: # Generate it
-
+ # Check whether or not there is a code.css file around.
+ if not code_css_input:
def create_code_css():
from pygments.formatters import get_formatter_by_name
formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
utils.makedirs(os.path.dirname(code_css_path))
with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
+ outf.write(kw["code.css_head"])
outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
outf.write(kw["code.css_close"])
+ if os.path.exists(code_css_path):
+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:
+ testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
+ else:
+ testcontents = False
+
task = {
'basename': self.name,
'name': code_css_path,
'targets': [code_css_path],
- 'uptodate': [utils.config_changed(kw)],
+ 'uptodate': [utils.config_changed(kw), testcontents],
'actions': [(create_code_css, [])],
'clean': True,
}
| {"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -45,15 +45,21 @@\n \n kw = {\n \"themes\": self.site.THEMES,\n+ \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n+ \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n- has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n+ code_css_input = utils.get_asset_path('assets/css/code.css',\n+ themes=kw['themes'],\n+ files_folders=kw['files_folders'])\n+\n+ kw[\"code.css_input\"] = code_css_input\n \n yield self.group_task()\n \n@@ -63,27 +69,35 @@\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n- has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n+ if code_css_input:\n+ task['file_dep'] = [code_css_input]\n yield utils.apply_filters(task, kw['filters'])\n \n- if not has_code_css: # Generate it\n-\n+ # Check whether or not there is a code.css file around.\n+ if not code_css_input:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n+ outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n \n+ if os.path.exists(code_css_path):\n+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:\n+ testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n+ else:\n+ testcontents = False\n+\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n- 'uptodate': [utils.config_changed(kw)],\n+ 'uptodate': [utils.config_changed(kw), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n", "issue": "render fails if the theme has a code.css \nThe `conf.py` says: \n\n```\n# Color scheme to be used for code blocks. If your theme provides\n# \"assets/css/code.css\" this is ignored.\n```\n\nI've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site\n\n```\n(blog)tin@morochita:~/lab/blog$ nikola build\nScanning posts.....done!\nERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.\n(blog)tin@morochita:~/lab/blog$ \n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport codecs\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n\n kw = {\n \"themes\": self.site.THEMES,\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n\n yield self.group_task()\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n yield utils.apply_filters(task, kw['filters'])\n\n if not has_code_css: # Generate it\n\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw)],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]} | 1,704 | 689 |
gh_patches_debug_2437 | rasdani/github-patches | git_diff | urllib3__urllib3-2843 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
flaky and pytest-memray incompatible
### Subject
```
______________________________________________________________________________________________________ TestHTTPProxyManager.test_forwarding_proxy_request_timeout[https-https-True] ______________________________________________________________________________________________________
Traceback (most recent call last):
File "/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py", line 122, in wrapper
result: object | None = func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py", line 121, in wrapper
with Tracker(result_file):
File "src/memray/_memray.pyx", line 404, in memray._memray.Tracker.__enter__
RuntimeError: No more than one Tracker instance can be active at the same time
```
caused by a flaky test:
```
===Flaky Test Report===
test_forwarding_proxy_request_timeout[https-https-True] failed (1 runs remaining out of 2).
<class 'AssertionError'>
assert <class 'urllib3.exceptions.ProxyError'> == ReadTimeoutError
+ where <class 'urllib3.exceptions.ProxyError'> = type(ProxyError('Unable to connect to proxy', ReadTimeoutError("HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)")))
+ where ProxyError('Unable to connect to proxy', ReadTimeoutError("HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)")) = MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\'Unable to connect to proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))').reason
+ where MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\'Unable to connect to proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))') = <ExceptionInfo MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Ca...proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))') tblen=10>.value
[<TracebackEntry /home/graingert/projects/urllib3/test/with_dummyserver/test_proxy_poolmanager.py:484>]
test_forwarding_proxy_request_timeout[https-https-True] failed; it passed 0 out of the required 1 times.
<class 'RuntimeError'>
No more than one Tracker instance can be active at the same time
[<TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:122>, <TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:121>, <TracebackEntry src/memray/_memray.pyx:404>]
```
see also https://github.com/bloomberg/pytest-memray/issues/53
</issue>
<code>
[start of noxfile.py]
1 from __future__ import annotations
2
3 import os
4 import shutil
5 import subprocess
6 import sys
7
8 import nox
9
10 SOURCE_FILES = [
11 "docs/",
12 "dummyserver/",
13 "src/",
14 "test/",
15 "noxfile.py",
16 "setup.py",
17 ]
18
19
20 def tests_impl(
21 session: nox.Session,
22 extras: str = "socks,secure,brotli,zstd",
23 byte_string_comparisons: bool = True,
24 ) -> None:
25 # Install deps and the package itself.
26 session.install("-r", "dev-requirements.txt")
27 session.install(f".[{extras}]")
28
29 # Show the pip version.
30 session.run("pip", "--version")
31 # Print the Python version and bytesize.
32 session.run("python", "--version")
33 session.run("python", "-c", "import struct; print(struct.calcsize('P') * 8)")
34 # Print OpenSSL information.
35 session.run("python", "-m", "OpenSSL.debug")
36
37 memray_supported = True
38 if sys.implementation.name != "cpython" or sys.version_info < (3, 8):
39 memray_supported = False # pytest-memray requires CPython 3.8+
40 elif sys.platform == "win32":
41 memray_supported = False
42
43 # Inspired from https://hynek.me/articles/ditch-codecov-python/
44 # We use parallel mode and then combine in a later CI step
45 session.run(
46 "python",
47 *(("-bb",) if byte_string_comparisons else ()),
48 "-m",
49 "coverage",
50 "run",
51 "--parallel-mode",
52 "-m",
53 "pytest",
54 *("--memray", "--hide-memray-summary") if memray_supported else (),
55 "-v",
56 "-ra",
57 f"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}",
58 "--tb=native",
59 "--no-success-flaky-report",
60 "--durations=10",
61 "--strict-config",
62 "--strict-markers",
63 *(session.posargs or ("test/",)),
64 env={"PYTHONWARNINGS": "always::DeprecationWarning"},
65 )
66
67
68 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy"])
69 def test(session: nox.Session) -> None:
70 tests_impl(session)
71
72
73 @nox.session(python=["2.7"])
74 def unsupported_setup_py(session: nox.Session) -> None:
75 # Can't check both returncode and output with session.run
76 process = subprocess.run(
77 ["python", "setup.py", "install"],
78 env={**session.env},
79 text=True,
80 capture_output=True,
81 )
82 assert process.returncode == 1
83 print(process.stderr)
84 assert "Please use `python -m pip install .` instead." in process.stderr
85
86
87 @nox.session(python=["3"])
88 def test_brotlipy(session: nox.Session) -> None:
89 """Check that if 'brotlipy' is installed instead of 'brotli' or
90 'brotlicffi' that we still don't blow up.
91 """
92 session.install("brotlipy")
93 tests_impl(session, extras="socks,secure", byte_string_comparisons=False)
94
95
96 def git_clone(session: nox.Session, git_url: str) -> None:
97 """We either clone the target repository or if already exist
98 simply reset the state and pull.
99 """
100 expected_directory = git_url.split("/")[-1]
101
102 if expected_directory.endswith(".git"):
103 expected_directory = expected_directory[:-4]
104
105 if not os.path.isdir(expected_directory):
106 session.run("git", "clone", "--depth", "1", git_url, external=True)
107 else:
108 session.run(
109 "git", "-C", expected_directory, "reset", "--hard", "HEAD", external=True
110 )
111 session.run("git", "-C", expected_directory, "pull", external=True)
112
113
114 @nox.session()
115 def downstream_botocore(session: nox.Session) -> None:
116 root = os.getcwd()
117 tmp_dir = session.create_tmp()
118
119 session.cd(tmp_dir)
120 git_clone(session, "https://github.com/boto/botocore")
121 session.chdir("botocore")
122 for patch in [
123 "0001-Mark-100-Continue-tests-as-failing.patch",
124 "0002-Stop-relying-on-removed-DEFAULT_CIPHERS.patch",
125 ]:
126 session.run("git", "apply", f"{root}/ci/{patch}", external=True)
127 session.run("git", "rev-parse", "HEAD", external=True)
128 session.run("python", "scripts/ci/install")
129
130 session.cd(root)
131 session.install(".", silent=False)
132 session.cd(f"{tmp_dir}/botocore")
133
134 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
135 session.run("python", "scripts/ci/run-tests")
136
137
138 @nox.session()
139 def downstream_requests(session: nox.Session) -> None:
140 root = os.getcwd()
141 tmp_dir = session.create_tmp()
142
143 session.cd(tmp_dir)
144 git_clone(session, "https://github.com/psf/requests")
145 session.chdir("requests")
146 session.run(
147 "git", "apply", f"{root}/ci/0003-requests-removed-warnings.patch", external=True
148 )
149 session.run(
150 "git", "apply", f"{root}/ci/0004-requests-chunked-requests.patch", external=True
151 )
152 session.run("git", "rev-parse", "HEAD", external=True)
153 session.install(".[socks]", silent=False)
154 session.install("-r", "requirements-dev.txt", silent=False)
155
156 session.cd(root)
157 session.install(".", silent=False)
158 session.cd(f"{tmp_dir}/requests")
159
160 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
161 session.run("pytest", "tests")
162
163
164 @nox.session()
165 def format(session: nox.Session) -> None:
166 """Run code formatters."""
167 lint(session)
168
169
170 @nox.session
171 def lint(session: nox.Session) -> None:
172 session.install("pre-commit")
173 session.run("pre-commit", "run", "--all-files")
174
175 mypy(session)
176
177
178 @nox.session(python="3.8")
179 def mypy(session: nox.Session) -> None:
180 """Run mypy."""
181 session.install("-r", "mypy-requirements.txt")
182 session.run("mypy", "--version")
183 session.run(
184 "mypy",
185 "dummyserver",
186 "noxfile.py",
187 "src/urllib3",
188 "test",
189 )
190
191
192 @nox.session
193 def docs(session: nox.Session) -> None:
194 session.install("-r", "docs/requirements.txt")
195 session.install(".[socks,secure,brotli,zstd]")
196
197 session.chdir("docs")
198 if os.path.exists("_build"):
199 shutil.rmtree("_build")
200 session.run("sphinx-build", "-b", "html", "-W", ".", "_build/html")
201
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -56,7 +56,6 @@
"-ra",
f"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}",
"--tb=native",
- "--no-success-flaky-report",
"--durations=10",
"--strict-config",
"--strict-markers",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -56,7 +56,6 @@\n \"-ra\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n- \"--no-success-flaky-report\",\n \"--durations=10\",\n \"--strict-config\",\n \"--strict-markers\",\n", "issue": "flaky and pytest-memray incompatible\n### Subject\r\n\r\n```\r\n______________________________________________________________________________________________________ TestHTTPProxyManager.test_forwarding_proxy_request_timeout[https-https-True] ______________________________________________________________________________________________________\r\nTraceback (most recent call last):\r\n File \"/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py\", line 122, in wrapper\r\n result: object | None = func(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py\", line 121, in wrapper\r\n with Tracker(result_file):\r\n File \"src/memray/_memray.pyx\", line 404, in memray._memray.Tracker.__enter__\r\nRuntimeError: No more than one Tracker instance can be active at the same time\r\n```\r\ncaused by a flaky test:\r\n```\r\n===Flaky Test Report===\r\n\r\ntest_forwarding_proxy_request_timeout[https-https-True] failed (1 runs remaining out of 2).\r\n <class 'AssertionError'>\r\n assert <class 'urllib3.exceptions.ProxyError'> == ReadTimeoutError\r\n + where <class 'urllib3.exceptions.ProxyError'> = type(ProxyError('Unable to connect to proxy', ReadTimeoutError(\"HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)\")))\r\n + where ProxyError('Unable to connect to proxy', ReadTimeoutError(\"HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)\")) = MaxRetryError('HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\\'Unable to connect to proxy\\', ReadTimeoutError(\"HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Read timed out. (read timeout=0.01)\")))').reason\r\n + where MaxRetryError('HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\\'Unable to connect to proxy\\', ReadTimeoutError(\"HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Read timed out. (read timeout=0.01)\")))') = <ExceptionInfo MaxRetryError('HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Max retries exceeded with url: https://240.0.0.0 (Ca...proxy\\', ReadTimeoutError(\"HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Read timed out. (read timeout=0.01)\")))') tblen=10>.value\r\n [<TracebackEntry /home/graingert/projects/urllib3/test/with_dummyserver/test_proxy_poolmanager.py:484>]\r\ntest_forwarding_proxy_request_timeout[https-https-True] failed; it passed 0 out of the required 1 times.\r\n <class 'RuntimeError'>\r\n No more than one Tracker instance can be active at the same time\r\n [<TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:122>, <TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:121>, <TracebackEntry src/memray/_memray.pyx:404>]\r\n\r\n```\r\n\r\nsee also https://github.com/bloomberg/pytest-memray/issues/53\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n memray_supported = True\n if sys.implementation.name != \"cpython\" or sys.version_info < (3, 8):\n memray_supported = False # pytest-memray requires CPython 3.8+\n elif sys.platform == \"win32\":\n memray_supported = False\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n *(\"--memray\", \"--hide-memray-summary\") if memray_supported else (),\n \"-v\",\n \"-ra\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n \"--durations=10\",\n \"--strict-config\",\n \"--strict-markers\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n \"\"\"We either clone the target repository or if already exist\n simply reset the state and pull.\n \"\"\"\n expected_directory = git_url.split(\"/\")[-1]\n\n if expected_directory.endswith(\".git\"):\n expected_directory = expected_directory[:-4]\n\n if not os.path.isdir(expected_directory):\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n else:\n session.run(\n \"git\", \"-C\", expected_directory, \"reset\", \"--hard\", \"HEAD\", external=True\n )\n session.run(\"git\", \"-C\", expected_directory, \"pull\", external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n for patch in [\n \"0001-Mark-100-Continue-tests-as-failing.patch\",\n \"0002-Stop-relying-on-removed-DEFAULT_CIPHERS.patch\",\n ]:\n session.run(\"git\", \"apply\", f\"{root}/ci/{patch}\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0003-requests-removed-warnings.patch\", external=True\n )\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0004-requests-chunked-requests.patch\", external=True\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}]} | 3,490 | 99 |
gh_patches_debug_34527 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1694 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: source 'City of Karlsruhe' stopped working
### I Have A Problem With:
A specific source
### What's Your Problem
Release 1.44.0:
Due to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.
I start troubleshooting and add my findings here.
### Source (if relevant)
karlsruhe_de
### Logs
_No response_
### Relevant Configuration
_No response_
### Checklist Source Error
- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [x] Checked that the website of your service provider is still working
- [x] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py]
1 from datetime import datetime
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "City of Karlsruhe"
8 DESCRIPTION = "Source for City of Karlsruhe."
9 URL = "https://www.karlsruhe.de/"
10 TEST_CASES = {
11 "Östliche Rheinbrückenstraße 1": {
12 "street": "Östliche Rheinbrückenstraße",
13 "hnr": 1,
14 },
15 "Habichtweg 4": {"street": "Habichtweg", "hnr": 4},
16 "Machstraße 5": {"street": "Machstraße", "hnr": 5},
17 "Bernsteinstraße 10 ladeort 1": {
18 "street": "Bernsteinstraße",
19 "hnr": 10,
20 "ladeort": 1,
21 },
22 "Bernsteinstraße 10 ladeort 2": {
23 "street": "Bernsteinstraße",
24 "hnr": 10,
25 "ladeort": 2,
26 },
27 }
28
29
30 ICON_MAP = {
31 "Restmüll": "mdi:trash-can",
32 "Bioabfall": "mdi:leaf",
33 "Papier": "mdi:package-variant",
34 "Wertstoff": "mdi:recycle",
35 "Sperrmüllabholung": "mdi:wardrobe",
36 }
37
38
39 API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
40
41
42 class Source:
43 def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):
44 self._street: str = street
45 self._hnr: str | int = hnr
46 self._ladeort: int | None = ladeort
47 self.ics = ICS()
48
49 def fetch(self):
50 now = datetime.now()
51 error = None
52 for year in (now.year, now.year + 1, now.year - 1):
53 try:
54 return self.get_data(API_URL.format(year=year))
55 except Exception as e:
56 error = e
57 raise error
58
59 def get_data(self, url):
60 data = {
61 "strasse_n": self._street,
62 "hausnr": self._hnr,
63 "ical": "+iCalendar",
64 "ladeort": self._ladeort,
65 }
66 params = {"hausnr": self._hnr}
67
68 r = requests.post(url, data=data, params=params)
69 dates = self.ics.convert(r.text)
70
71 entries = []
72 for d in dates:
73 date, waste_type = d
74 waste_type = waste_type.split(",")[0]
75 icon = ICON_MAP.get(waste_type)
76 entries.append(Collection(date=date, t=waste_type, icon=icon))
77
78 return entries
79
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
@@ -1,9 +1,17 @@
from datetime import datetime
import requests
+import urllib3
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
+# With verify=True the POST fails due to a SSLCertVerificationError.
+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
+# These two lines areused to suppress the InsecureRequestWarning when using verify=False
+urllib3.disable_warnings()
+
TITLE = "City of Karlsruhe"
DESCRIPTION = "Source for City of Karlsruhe."
URL = "https://www.karlsruhe.de/"
@@ -36,7 +44,7 @@
}
-API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
+API_URL = "https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php"
class Source:
@@ -50,10 +58,11 @@
now = datetime.now()
error = None
for year in (now.year, now.year + 1, now.year - 1):
- try:
- return self.get_data(API_URL.format(year=year))
- except Exception as e:
- error = e
+ for i in (4, 6):
+ try:
+ return self.get_data(API_URL.format(year=year, i=i))
+ except Exception as e:
+ error = e
raise error
def get_data(self, url):
@@ -65,7 +74,7 @@
}
params = {"hausnr": self._hnr}
- r = requests.post(url, data=data, params=params)
+ r = requests.post(url, data=data, params=params, verify=False)
dates = self.ics.convert(r.text)
entries = []
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n@@ -1,9 +1,17 @@\n from datetime import datetime\n \n import requests\n+import urllib3\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n from waste_collection_schedule.service.ICS import ICS\n \n+# With verify=True the POST fails due to a SSLCertVerificationError.\n+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n+# These two lines areused to suppress the InsecureRequestWarning when using verify=False\n+urllib3.disable_warnings()\n+\n TITLE = \"City of Karlsruhe\"\n DESCRIPTION = \"Source for City of Karlsruhe.\"\n URL = \"https://www.karlsruhe.de/\"\n@@ -36,7 +44,7 @@\n }\n \n \n-API_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n+API_URL = \"https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n \n \n class Source:\n@@ -50,10 +58,11 @@\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n- try:\n- return self.get_data(API_URL.format(year=year))\n- except Exception as e:\n- error = e\n+ for i in (4, 6):\n+ try:\n+ return self.get_data(API_URL.format(year=year, i=i))\n+ except Exception as e:\n+ error = e\n raise error\n \n def get_data(self, url):\n@@ -65,7 +74,7 @@\n }\n params = {\"hausnr\": self._hnr}\n \n- r = requests.post(url, data=data, params=params)\n+ r = requests.post(url, data=data, params=params, verify=False)\n dates = self.ics.convert(r.text)\n \n entries = []\n", "issue": "[Bug]: source 'City of Karlsruhe' stopped working\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nRelease 1.44.0:\r\nDue to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.\r\nI start troubleshooting and add my findings here.\n\n### Source (if relevant)\n\nkarlsruhe_de\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [x] Checked that the website of your service provider is still working\n- [x] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"City of Karlsruhe\"\nDESCRIPTION = \"Source for City of Karlsruhe.\"\nURL = \"https://www.karlsruhe.de/\"\nTEST_CASES = {\n \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe 1\": {\n \"street\": \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe\",\n \"hnr\": 1,\n },\n \"Habichtweg 4\": {\"street\": \"Habichtweg\", \"hnr\": 4},\n \"Machstra\u00dfe 5\": {\"street\": \"Machstra\u00dfe\", \"hnr\": 5},\n \"Bernsteinstra\u00dfe 10 ladeort 1\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 1,\n },\n \"Bernsteinstra\u00dfe 10 ladeort 2\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 2,\n },\n}\n\n\nICON_MAP = {\n \"Restm\u00fcll\": \"mdi:trash-can\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Wertstoff\": \"mdi:recycle\",\n \"Sperrm\u00fcllabholung\": \"mdi:wardrobe\",\n}\n\n\nAPI_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n\n\nclass Source:\n def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):\n self._street: str = street\n self._hnr: str | int = hnr\n self._ladeort: int | None = ladeort\n self.ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n try:\n return self.get_data(API_URL.format(year=year))\n except Exception as e:\n error = e\n raise error\n\n def get_data(self, url):\n data = {\n \"strasse_n\": self._street,\n \"hausnr\": self._hnr,\n \"ical\": \"+iCalendar\",\n \"ladeort\": self._ladeort,\n }\n params = {\"hausnr\": self._hnr}\n\n r = requests.post(url, data=data, params=params)\n dates = self.ics.convert(r.text)\n\n entries = []\n for d in dates:\n date, waste_type = d\n waste_type = waste_type.split(\",\")[0]\n icon = ICON_MAP.get(waste_type)\n entries.append(Collection(date=date, t=waste_type, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py"}]} | 1,651 | 546 |
gh_patches_debug_10893 | rasdani/github-patches | git_diff | conan-io__conan-4888 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remote lookup in offline mode
Hello,
Following up the small issue that I had during my demo at swampUP:
I was trying to create a package for which I had every build requirement recipe already exported and thus available.
I was expecting to get an error:
>no such package with current options/settings: **build requirement package**
However, it seems that Conan did try to check this package in remotes (with no internet access, it failed). The expected behavior would be for Conan to check the local cache after remotes.
Passing `--build missing` to `conan create` did not fix the issue.
Using Conan v1.3.3
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
</issue>
<code>
[start of conans/client/graph/graph_binaries.py]
1 import os
2
3 from conans.client.graph.graph import (BINARY_BUILD, BINARY_CACHE, BINARY_DOWNLOAD, BINARY_MISSING,
4 BINARY_SKIP, BINARY_UPDATE,
5 RECIPE_EDITABLE, BINARY_EDITABLE,
6 RECIPE_CONSUMER, RECIPE_VIRTUAL)
7 from conans.errors import NoRemoteAvailable, NotFoundException,\
8 conanfile_exception_formatter
9 from conans.model.info import ConanInfo
10 from conans.model.manifest import FileTreeManifest
11 from conans.model.ref import PackageReference
12 from conans.util.files import is_dirty, rmdir
13
14
15 class GraphBinariesAnalyzer(object):
16
17 def __init__(self, cache, output, remote_manager):
18 self._cache = cache
19 self._out = output
20 self._remote_manager = remote_manager
21 self._registry = cache.registry
22
23 def _check_update(self, upstream_manifest, package_folder, output, node):
24 read_manifest = FileTreeManifest.load(package_folder)
25 if upstream_manifest != read_manifest:
26 if upstream_manifest.time > read_manifest.time:
27 output.warn("Current package is older than remote upstream one")
28 node.update_manifest = upstream_manifest
29 return True
30 else:
31 output.warn("Current package is newer than remote upstream one")
32
33 def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):
34 assert node.binary is None, "Node.binary should be None"
35 assert node.package_id is not None, "Node.package_id shouldn't be None"
36
37 ref, conanfile = node.ref, node.conanfile
38 pref = PackageReference(ref, node.package_id)
39
40 # Check that this same reference hasn't already been checked
41 previous_nodes = evaluated_nodes.get(pref)
42 if previous_nodes:
43 previous_nodes.append(node)
44 previous_node = previous_nodes[0]
45 node.binary = previous_node.binary
46 node.binary_remote = previous_node.binary_remote
47 node.prev = previous_node.prev
48 return
49 evaluated_nodes[pref] = [node]
50
51 output = conanfile.output
52
53 if node.recipe == RECIPE_EDITABLE:
54 node.binary = BINARY_EDITABLE
55 # TODO: PREV?
56 return
57
58 if build_mode.forced(conanfile, ref):
59 output.warn('Forced build from source')
60 node.binary = BINARY_BUILD
61 node.prev = None
62 return
63
64 package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)
65
66 # Check if dirty, to remove it
67 with self._cache.package_lock(pref):
68 assert node.recipe != RECIPE_EDITABLE, "Editable package shouldn't reach this code"
69 if is_dirty(package_folder):
70 output.warn("Package is corrupted, removing folder: %s" % package_folder)
71 rmdir(package_folder) # Do not remove if it is EDITABLE
72
73 if self._cache.config.revisions_enabled:
74 metadata = self._cache.package_layout(pref.ref).load_metadata()
75 rec_rev = metadata.packages[pref.id].recipe_revision
76 if rec_rev and rec_rev != node.ref.revision:
77 output.warn("The package {} doesn't belong "
78 "to the installed recipe revision, removing folder".format(pref))
79 rmdir(package_folder)
80
81 if remote_name:
82 remote = self._registry.remotes.get(remote_name)
83 else:
84 # If the remote_name is not given, follow the binary remote, or
85 # the recipe remote
86 # If it is defined it won't iterate (might change in conan2.0)
87 remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)
88 remotes = self._registry.remotes.list
89
90 if os.path.exists(package_folder):
91 if update:
92 if remote:
93 try:
94 tmp = self._remote_manager.get_package_manifest(pref, remote)
95 upstream_manifest, pref = tmp
96 except NotFoundException:
97 output.warn("Can't update, no package in remote")
98 except NoRemoteAvailable:
99 output.warn("Can't update, no remote defined")
100 else:
101 if self._check_update(upstream_manifest, package_folder, output, node):
102 node.binary = BINARY_UPDATE
103 node.prev = pref.revision # With revision
104 if build_mode.outdated:
105 info, pref = self._remote_manager.get_package_info(pref, remote)
106 package_hash = info.recipe_hash
107 elif remotes:
108 pass
109 else:
110 output.warn("Can't update, no remote defined")
111 if not node.binary:
112 node.binary = BINARY_CACHE
113 metadata = self._cache.package_layout(pref.ref).load_metadata()
114 node.prev = metadata.packages[pref.id].revision
115 package_hash = ConanInfo.load_from_package(package_folder).recipe_hash
116
117 else: # Binary does NOT exist locally
118 remote_info = None
119 if remote:
120 try:
121 remote_info, pref = self._remote_manager.get_package_info(pref, remote)
122 except NotFoundException:
123 pass
124
125 # If the "remote" came from the registry but the user didn't specified the -r, with
126 # revisions iterate all remotes
127 if not remote or (not remote_info and self._cache.config.revisions_enabled
128 and not remote_name):
129 for r in remotes:
130 try:
131 remote_info, pref = self._remote_manager.get_package_info(pref, r)
132 except NotFoundException:
133 pass
134 else:
135 if remote_info:
136 remote = r
137 break
138
139 if remote_info:
140 node.binary = BINARY_DOWNLOAD
141 node.prev = pref.revision
142 package_hash = remote_info.recipe_hash
143 else:
144 if build_mode.allowed(conanfile):
145 node.binary = BINARY_BUILD
146 else:
147 node.binary = BINARY_MISSING
148 node.prev = None
149
150 if build_mode.outdated:
151 if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):
152 local_recipe_hash = self._cache.package_layout(ref).recipe_manifest().summary_hash
153 if local_recipe_hash != package_hash:
154 output.info("Outdated package!")
155 node.binary = BINARY_BUILD
156 node.prev = None
157 else:
158 output.info("Package is up to date")
159
160 node.binary_remote = remote
161
162 @staticmethod
163 def _compute_package_id(node, default_package_id_mode):
164 conanfile = node.conanfile
165 neighbors = node.neighbors()
166 direct_reqs = [] # of PackageReference
167 indirect_reqs = set() # of PackageReference, avoid duplicates
168 for neighbor in neighbors:
169 ref, nconan = neighbor.ref, neighbor.conanfile
170 direct_reqs.append(neighbor.pref)
171 indirect_reqs.update(nconan.info.requires.refs())
172 conanfile.options.propagate_downstream(ref, nconan.info.full_options)
173 # Might be never used, but update original requirement, just in case
174 conanfile.requires[ref.name].ref = ref
175
176 # Make sure not duplicated
177 indirect_reqs.difference_update(direct_reqs)
178 # There might be options that are not upstream, backup them, might be
179 # for build-requires
180 conanfile.build_requires_options = conanfile.options.values
181 conanfile.options.clear_unused(indirect_reqs.union(direct_reqs))
182 conanfile.options.freeze()
183
184 conanfile.info = ConanInfo.create(conanfile.settings.values,
185 conanfile.options.values,
186 direct_reqs,
187 indirect_reqs,
188 default_package_id_mode=default_package_id_mode)
189
190 # Once we are done, call package_id() to narrow and change possible values
191 with conanfile_exception_formatter(str(conanfile), "package_id"):
192 conanfile.package_id()
193
194 info = conanfile.info
195 node.package_id = info.package_id()
196
197 def _handle_private(self, node):
198 if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE, BINARY_SKIP):
199 private_neighbours = node.private_neighbors()
200 for neigh in private_neighbours:
201 # Current closure contains own node to be skipped
202 for n in neigh.public_closure.values():
203 n.binary = BINARY_SKIP
204 self._handle_private(n)
205
206 def evaluate_graph(self, deps_graph, build_mode, update, remote_name):
207 default_package_id_mode = self._cache.config.default_package_id_mode
208 evaluated = deps_graph.evaluated
209 for node in deps_graph.ordered_iterate():
210 self._compute_package_id(node, default_package_id_mode)
211 if node.recipe in (RECIPE_CONSUMER, RECIPE_VIRTUAL):
212 continue
213 self._evaluate_node(node, build_mode, update, evaluated, remote_name)
214 self._handle_private(node)
215
[end of conans/client/graph/graph_binaries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/graph/graph_binaries.py b/conans/client/graph/graph_binaries.py
--- a/conans/client/graph/graph_binaries.py
+++ b/conans/client/graph/graph_binaries.py
@@ -121,6 +121,9 @@
remote_info, pref = self._remote_manager.get_package_info(pref, remote)
except NotFoundException:
pass
+ except Exception:
+ conanfile.output.error("Error downloading binary package: '{}'".format(pref))
+ raise
# If the "remote" came from the registry but the user didn't specified the -r, with
# revisions iterate all remotes
| {"golden_diff": "diff --git a/conans/client/graph/graph_binaries.py b/conans/client/graph/graph_binaries.py\n--- a/conans/client/graph/graph_binaries.py\n+++ b/conans/client/graph/graph_binaries.py\n@@ -121,6 +121,9 @@\n remote_info, pref = self._remote_manager.get_package_info(pref, remote)\n except NotFoundException:\n pass\n+ except Exception:\n+ conanfile.output.error(\"Error downloading binary package: '{}'\".format(pref))\n+ raise\n \n # If the \"remote\" came from the registry but the user didn't specified the -r, with\n # revisions iterate all remotes\n", "issue": "Remote lookup in offline mode \nHello,\r\n\r\nFollowing up the small issue that I had during my demo at swampUP:\r\n\r\nI was trying to create a package for which I had every build requirement recipe already exported and thus available.\r\n\r\nI was expecting to get an error:\r\n\r\n>no such package with current options/settings: **build requirement package**\r\n\r\nHowever, it seems that Conan did try to check this package in remotes (with no internet access, it failed). The expected behavior would be for Conan to check the local cache after remotes.\r\n\r\nPassing `--build missing` to `conan create` did not fix the issue.\r\n\r\nUsing Conan v1.3.3\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import os\n\nfrom conans.client.graph.graph import (BINARY_BUILD, BINARY_CACHE, BINARY_DOWNLOAD, BINARY_MISSING,\n BINARY_SKIP, BINARY_UPDATE,\n RECIPE_EDITABLE, BINARY_EDITABLE,\n RECIPE_CONSUMER, RECIPE_VIRTUAL)\nfrom conans.errors import NoRemoteAvailable, NotFoundException,\\\n conanfile_exception_formatter\nfrom conans.model.info import ConanInfo\nfrom conans.model.manifest import FileTreeManifest\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import is_dirty, rmdir\n\n\nclass GraphBinariesAnalyzer(object):\n\n def __init__(self, cache, output, remote_manager):\n self._cache = cache\n self._out = output\n self._remote_manager = remote_manager\n self._registry = cache.registry\n\n def _check_update(self, upstream_manifest, package_folder, output, node):\n read_manifest = FileTreeManifest.load(package_folder)\n if upstream_manifest != read_manifest:\n if upstream_manifest.time > read_manifest.time:\n output.warn(\"Current package is older than remote upstream one\")\n node.update_manifest = upstream_manifest\n return True\n else:\n output.warn(\"Current package is newer than remote upstream one\")\n\n def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):\n assert node.binary is None, \"Node.binary should be None\"\n assert node.package_id is not None, \"Node.package_id shouldn't be None\"\n\n ref, conanfile = node.ref, node.conanfile\n pref = PackageReference(ref, node.package_id)\n\n # Check that this same reference hasn't already been checked\n previous_nodes = evaluated_nodes.get(pref)\n if previous_nodes:\n previous_nodes.append(node)\n previous_node = previous_nodes[0]\n node.binary = previous_node.binary\n node.binary_remote = previous_node.binary_remote\n node.prev = previous_node.prev\n return\n evaluated_nodes[pref] = [node]\n\n output = conanfile.output\n\n if node.recipe == RECIPE_EDITABLE:\n node.binary = BINARY_EDITABLE\n # TODO: PREV?\n return\n\n if build_mode.forced(conanfile, ref):\n output.warn('Forced build from source')\n node.binary = BINARY_BUILD\n node.prev = None\n return\n\n package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)\n\n # Check if dirty, to remove it\n with self._cache.package_lock(pref):\n assert node.recipe != RECIPE_EDITABLE, \"Editable package shouldn't reach this code\"\n if is_dirty(package_folder):\n output.warn(\"Package is corrupted, removing folder: %s\" % package_folder)\n rmdir(package_folder) # Do not remove if it is EDITABLE\n\n if self._cache.config.revisions_enabled:\n metadata = self._cache.package_layout(pref.ref).load_metadata()\n rec_rev = metadata.packages[pref.id].recipe_revision\n if rec_rev and rec_rev != node.ref.revision:\n output.warn(\"The package {} doesn't belong \"\n \"to the installed recipe revision, removing folder\".format(pref))\n rmdir(package_folder)\n\n if remote_name:\n remote = self._registry.remotes.get(remote_name)\n else:\n # If the remote_name is not given, follow the binary remote, or\n # the recipe remote\n # If it is defined it won't iterate (might change in conan2.0)\n remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)\n remotes = self._registry.remotes.list\n\n if os.path.exists(package_folder):\n if update:\n if remote:\n try:\n tmp = self._remote_manager.get_package_manifest(pref, remote)\n upstream_manifest, pref = tmp\n except NotFoundException:\n output.warn(\"Can't update, no package in remote\")\n except NoRemoteAvailable:\n output.warn(\"Can't update, no remote defined\")\n else:\n if self._check_update(upstream_manifest, package_folder, output, node):\n node.binary = BINARY_UPDATE\n node.prev = pref.revision # With revision\n if build_mode.outdated:\n info, pref = self._remote_manager.get_package_info(pref, remote)\n package_hash = info.recipe_hash\n elif remotes:\n pass\n else:\n output.warn(\"Can't update, no remote defined\")\n if not node.binary:\n node.binary = BINARY_CACHE\n metadata = self._cache.package_layout(pref.ref).load_metadata()\n node.prev = metadata.packages[pref.id].revision\n package_hash = ConanInfo.load_from_package(package_folder).recipe_hash\n\n else: # Binary does NOT exist locally\n remote_info = None\n if remote:\n try:\n remote_info, pref = self._remote_manager.get_package_info(pref, remote)\n except NotFoundException:\n pass\n\n # If the \"remote\" came from the registry but the user didn't specified the -r, with\n # revisions iterate all remotes\n if not remote or (not remote_info and self._cache.config.revisions_enabled\n and not remote_name):\n for r in remotes:\n try:\n remote_info, pref = self._remote_manager.get_package_info(pref, r)\n except NotFoundException:\n pass\n else:\n if remote_info:\n remote = r\n break\n\n if remote_info:\n node.binary = BINARY_DOWNLOAD\n node.prev = pref.revision\n package_hash = remote_info.recipe_hash\n else:\n if build_mode.allowed(conanfile):\n node.binary = BINARY_BUILD\n else:\n node.binary = BINARY_MISSING\n node.prev = None\n\n if build_mode.outdated:\n if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):\n local_recipe_hash = self._cache.package_layout(ref).recipe_manifest().summary_hash\n if local_recipe_hash != package_hash:\n output.info(\"Outdated package!\")\n node.binary = BINARY_BUILD\n node.prev = None\n else:\n output.info(\"Package is up to date\")\n\n node.binary_remote = remote\n\n @staticmethod\n def _compute_package_id(node, default_package_id_mode):\n conanfile = node.conanfile\n neighbors = node.neighbors()\n direct_reqs = [] # of PackageReference\n indirect_reqs = set() # of PackageReference, avoid duplicates\n for neighbor in neighbors:\n ref, nconan = neighbor.ref, neighbor.conanfile\n direct_reqs.append(neighbor.pref)\n indirect_reqs.update(nconan.info.requires.refs())\n conanfile.options.propagate_downstream(ref, nconan.info.full_options)\n # Might be never used, but update original requirement, just in case\n conanfile.requires[ref.name].ref = ref\n\n # Make sure not duplicated\n indirect_reqs.difference_update(direct_reqs)\n # There might be options that are not upstream, backup them, might be\n # for build-requires\n conanfile.build_requires_options = conanfile.options.values\n conanfile.options.clear_unused(indirect_reqs.union(direct_reqs))\n conanfile.options.freeze()\n\n conanfile.info = ConanInfo.create(conanfile.settings.values,\n conanfile.options.values,\n direct_reqs,\n indirect_reqs,\n default_package_id_mode=default_package_id_mode)\n\n # Once we are done, call package_id() to narrow and change possible values\n with conanfile_exception_formatter(str(conanfile), \"package_id\"):\n conanfile.package_id()\n\n info = conanfile.info\n node.package_id = info.package_id()\n\n def _handle_private(self, node):\n if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE, BINARY_SKIP):\n private_neighbours = node.private_neighbors()\n for neigh in private_neighbours:\n # Current closure contains own node to be skipped\n for n in neigh.public_closure.values():\n n.binary = BINARY_SKIP\n self._handle_private(n)\n\n def evaluate_graph(self, deps_graph, build_mode, update, remote_name):\n default_package_id_mode = self._cache.config.default_package_id_mode\n evaluated = deps_graph.evaluated\n for node in deps_graph.ordered_iterate():\n self._compute_package_id(node, default_package_id_mode)\n if node.recipe in (RECIPE_CONSUMER, RECIPE_VIRTUAL):\n continue\n self._evaluate_node(node, build_mode, update, evaluated, remote_name)\n self._handle_private(node)\n", "path": "conans/client/graph/graph_binaries.py"}]} | 3,156 | 140 |
gh_patches_debug_28800 | rasdani/github-patches | git_diff | quantumlib__Cirq-1674 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve error message if on_each gets a list
When you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**.
Maybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.
</issue>
<code>
[start of cirq/ops/gate_features.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Marker classes for indicating which additional features gates support.
16
17 For example: some gates are reversible, some have known matrices, etc.
18 """
19
20 import abc
21
22 from cirq.ops import op_tree, raw_types
23
24
25 class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
26 """Indicates operations should be equal under some qubit permutations."""
27
28 def qubit_index_to_equivalence_group_key(self, index: int) -> int:
29 """Returns a key that differs between non-interchangeable qubits."""
30 return 0
31
32
33 class SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
34 """A gate that must be applied to exactly one qubit."""
35 def num_qubits(self) -> int:
36 return 1
37
38 def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
39 """Returns a list of operations apply this gate to each of the targets.
40
41 Args:
42 *targets: The qubits to apply this gate to.
43
44 Returns:
45 Operations applying this gate to the target qubits.
46
47 Raises:
48 ValueError if targets are not instances of Qid.
49 """
50 return [self.on(target) for target in targets]
51
52
53 class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
54 """A gate that must be applied to exactly two qubits."""
55 def num_qubits(self) -> int:
56 return 2
57
58
59 class ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
60 """A gate that must be applied to exactly three qubits."""
61 def num_qubits(self) -> int:
62 return 3
63
[end of cirq/ops/gate_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py
--- a/cirq/ops/gate_features.py
+++ b/cirq/ops/gate_features.py
@@ -18,8 +18,10 @@
"""
import abc
+import collections
+from typing import Union, Iterable, Any, List
-from cirq.ops import op_tree, raw_types
+from cirq.ops import raw_types
class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
@@ -35,7 +37,8 @@
def num_qubits(self) -> int:
return 1
- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]
+ ) -> List[raw_types.Operation]:
"""Returns a list of operations apply this gate to each of the targets.
Args:
@@ -45,9 +48,20 @@
Operations applying this gate to the target qubits.
Raises:
- ValueError if targets are not instances of Qid.
+ ValueError if targets are not instances of Qid or List[Qid].
"""
- return [self.on(target) for target in targets]
+ operations = [] # type: List[raw_types.Operation]
+ for target in targets:
+ if isinstance(target,
+ collections.Iterable) and not isinstance(target, str):
+ operations.extend(self.on_each(*target))
+ elif isinstance(target, raw_types.Qid):
+ operations.append(self.on(target))
+ else:
+ raise ValueError(
+ 'Gate was called with type different than Qid. Type: {}'.
+ format(type(target)))
+ return operations
class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
| {"golden_diff": "diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py\n--- a/cirq/ops/gate_features.py\n+++ b/cirq/ops/gate_features.py\n@@ -18,8 +18,10 @@\n \"\"\"\n \n import abc\n+import collections\n+from typing import Union, Iterable, Any, List\n \n-from cirq.ops import op_tree, raw_types\n+from cirq.ops import raw_types\n \n \n class InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n@@ -35,7 +37,8 @@\n def num_qubits(self) -> int:\n return 1\n \n- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]\n+ ) -> List[raw_types.Operation]:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n \n Args:\n@@ -45,9 +48,20 @@\n Operations applying this gate to the target qubits.\n \n Raises:\n- ValueError if targets are not instances of Qid.\n+ ValueError if targets are not instances of Qid or List[Qid].\n \"\"\"\n- return [self.on(target) for target in targets]\n+ operations = [] # type: List[raw_types.Operation]\n+ for target in targets:\n+ if isinstance(target,\n+ collections.Iterable) and not isinstance(target, str):\n+ operations.extend(self.on_each(*target))\n+ elif isinstance(target, raw_types.Qid):\n+ operations.append(self.on(target))\n+ else:\n+ raise ValueError(\n+ 'Gate was called with type different than Qid. Type: {}'.\n+ format(type(target)))\n+ return operations\n \n \n class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n", "issue": "Improve error message if on_each gets a list\nWhen you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**. \r\n\r\nMaybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Marker classes for indicating which additional features gates support.\n\nFor example: some gates are reversible, some have known matrices, etc.\n\"\"\"\n\nimport abc\n\nfrom cirq.ops import op_tree, raw_types\n\n\nclass InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n \"\"\"Indicates operations should be equal under some qubit permutations.\"\"\"\n\n def qubit_index_to_equivalence_group_key(self, index: int) -> int:\n \"\"\"Returns a key that differs between non-interchangeable qubits.\"\"\"\n return 0\n\n\nclass SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly one qubit.\"\"\"\n def num_qubits(self) -> int:\n return 1\n\n def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n\n Args:\n *targets: The qubits to apply this gate to.\n\n Returns:\n Operations applying this gate to the target qubits.\n\n Raises:\n ValueError if targets are not instances of Qid.\n \"\"\"\n return [self.on(target) for target in targets]\n\n\nclass TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly two qubits.\"\"\"\n def num_qubits(self) -> int:\n return 2\n\n\nclass ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly three qubits.\"\"\"\n def num_qubits(self) -> int:\n return 3\n", "path": "cirq/ops/gate_features.py"}]} | 1,263 | 411 |
gh_patches_debug_39718 | rasdani/github-patches | git_diff | prowler-cloud__prowler-2291 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist
### Steps to Reproduce
The mentioned checks are triggered even if no backups are present or configured.
### Expected behavior
When the check can't find a resource ID (it actually says "No Backups"), the check shouldn't trigger
### Actual Result with Screenshots or Logs

### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Workstation
### OS used
WSL2 under Windows 11
### Prowler version
Prowler 3.4.0 (it is the latest version, yay!)
### Pip version
pip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)
### Context
_No response_
</issue>
<code>
[start of prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py]
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_plans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
17 report.resource_arn = backup_client.backup_plans[0].arn
18 report.resource_id = backup_client.backup_plans[0].name
19 report.region = backup_client.backup_plans[0].region
20
21 findings.append(report)
22 return findings
23
[end of prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py]
[start of prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py]
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_reportplans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Report Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_report_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
17 report.resource_arn = backup_client.backup_report_plans[0].arn
18 report.resource_id = backup_client.backup_report_plans[0].name
19 report.region = backup_client.backup_report_plans[0].region
20
21 findings.append(report)
22 return findings
23
[end of prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py]
[start of prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py]
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_vaults_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Vault Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_vaults:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup vault exists: { backup_client.backup_vaults[0].name}"
17 report.resource_arn = backup_client.backup_vaults[0].arn
18 report.resource_id = backup_client.backup_vaults[0].name
19 report.region = backup_client.backup_vaults[0].region
20
21 findings.append(report)
22 return findings
23
[end of prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
@@ -9,11 +9,13 @@
report.status = "FAIL"
report.status_extended = "No Backup Plan Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_plans:
report.status = "PASS"
- report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
+ report.status_extended = (
+ f"At least one backup plan exists: {backup_client.backup_plans[0].name}"
+ )
report.resource_arn = backup_client.backup_plans[0].arn
report.resource_id = backup_client.backup_plans[0].name
report.region = backup_client.backup_plans[0].region
diff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
@@ -5,18 +5,20 @@
class backup_reportplans_exist(Check):
def execute(self):
findings = []
- report = Check_Report_AWS(self.metadata())
- report.status = "FAIL"
- report.status_extended = "No Backup Report Plan Exist"
- report.resource_arn = ""
- report.resource_id = "No Backups"
- report.region = backup_client.region
- if backup_client.backup_report_plans:
- report.status = "PASS"
- report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
- report.resource_arn = backup_client.backup_report_plans[0].arn
- report.resource_id = backup_client.backup_report_plans[0].name
- report.region = backup_client.backup_report_plans[0].region
+ # We only check report plans if backup plans exist, reducing noise
+ if backup_client.backup_plans:
+ report = Check_Report_AWS(self.metadata())
+ report.status = "FAIL"
+ report.status_extended = "No Backup Report Plan Exist"
+ report.resource_arn = ""
+ report.resource_id = "Backups"
+ report.region = backup_client.region
+ if backup_client.backup_report_plans:
+ report.status = "PASS"
+ report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
+ report.resource_arn = backup_client.backup_report_plans[0].arn
+ report.resource_id = backup_client.backup_report_plans[0].name
+ report.region = backup_client.backup_report_plans[0].region
- findings.append(report)
+ findings.append(report)
return findings
diff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
@@ -9,7 +9,7 @@
report.status = "FAIL"
report.status_extended = "No Backup Vault Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_vaults:
report.status = "PASS"
| {"golden_diff": "diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n@@ -9,11 +9,13 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n- report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n+ report.status_extended = (\n+ f\"At least one backup plan exists: {backup_client.backup_plans[0].name}\"\n+ )\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\ndiff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n@@ -5,18 +5,20 @@\n class backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n- report = Check_Report_AWS(self.metadata())\n- report.status = \"FAIL\"\n- report.status_extended = \"No Backup Report Plan Exist\"\n- report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n- report.region = backup_client.region\n- if backup_client.backup_report_plans:\n- report.status = \"PASS\"\n- report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n- report.resource_arn = backup_client.backup_report_plans[0].arn\n- report.resource_id = backup_client.backup_report_plans[0].name\n- report.region = backup_client.backup_report_plans[0].region\n+ # We only check report plans if backup plans exist, reducing noise\n+ if backup_client.backup_plans:\n+ report = Check_Report_AWS(self.metadata())\n+ report.status = \"FAIL\"\n+ report.status_extended = \"No Backup Report Plan Exist\"\n+ report.resource_arn = \"\"\n+ report.resource_id = \"Backups\"\n+ report.region = backup_client.region\n+ if backup_client.backup_report_plans:\n+ report.status = \"PASS\"\n+ report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n+ report.resource_arn = backup_client.backup_report_plans[0].arn\n+ report.resource_id = backup_client.backup_report_plans[0].name\n+ report.region = backup_client.backup_report_plans[0].region\n \n- findings.append(report)\n+ findings.append(report)\n return findings\ndiff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n@@ -9,7 +9,7 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n", "issue": "[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist\n### Steps to Reproduce\n\nThe mentioned checks are triggered even if no backups are present or configured.\n\n### Expected behavior\n\nWhen the check can't find a resource ID (it actually says \"No Backups\"), the check shouldn't trigger\n\n### Actual Result with Screenshots or Logs\n\n\r\n\n\n### How did you install Prowler?\n\nFrom pip package (pip install prowler)\n\n### Environment Resource\n\nWorkstation\n\n### OS used\n\nWSL2 under Windows 11\n\n### Prowler version\n\nProwler 3.4.0 (it is the latest version, yay!)\n\n### Pip version\n\npip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_plans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Report Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_report_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n report.resource_arn = backup_client.backup_report_plans[0].arn\n report.resource_id = backup_client.backup_report_plans[0].name\n report.region = backup_client.backup_report_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_vaults_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup vault exists: { backup_client.backup_vaults[0].name}\"\n report.resource_arn = backup_client.backup_vaults[0].arn\n report.resource_id = backup_client.backup_vaults[0].name\n report.region = backup_client.backup_vaults[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py"}]} | 1,590 | 915 |
gh_patches_debug_19093 | rasdani/github-patches | git_diff | weecology__retriever-287 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
download command should probably fail when specified path does not exist
A datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist.
</issue>
<code>
[start of engines/download_only.py]
1 import os
2 import platform
3 import shutil
4 import inspect
5 from retriever.lib.engine import filename_from_url
6 from retriever.lib.models import Engine, no_cleanup
7 from retriever import DATA_DIR, HOME_DIR
8
9 class DummyConnection:
10 def cursor(self):
11 pass
12 def commit(self):
13 pass
14 def rollback(self):
15 pass
16 def close(self):
17 pass
18
19 class DummyCursor(DummyConnection):
20 pass
21
22
23 class engine(Engine):
24 """Engine instance for writing data to a CSV file."""
25 name = "Download Only"
26 abbreviation = "download"
27 required_opts = [("path",
28 "File path to copy data files",
29 "./"),
30 ]
31
32 def table_exists(self, dbname, tablename):
33 try:
34 tablename = self.table_name(name=tablename, dbname=dbname)
35 return os.path.exists(tablename)
36 except:
37 return False
38
39 def get_connection(self):
40 """Gets the db connection."""
41 self.get_input()
42 return DummyConnection()
43
44 def final_cleanup(self):
45 data_dir = self.format_data_dir()
46 if hasattr(self, "all_files"):
47 for file_name in self.all_files:
48 file_path, file_name_nopath = os.path.split(file_name)
49 if file_path == DATA_DIR:
50 print ("%s is already in the working directory" % file_name_nopath)
51 print("Keeping existing copy.")
52 else:
53 print("Copying %s from %s" % (file_name_nopath, file_path))
54 shutil.copy(file_name, self.opts['path'])
55 self.all_files = set()
56
57 def auto_create_table(self, table, url=None, filename=None, pk=None):
58 if url and not filename:
59 filename = filename_from_url(url)
60
61 if url and not self.find_file(filename):
62 # If the file doesn't exist, download it
63 self.download_file(url, filename)
64
65 def insert_data_from_url(self, url):
66 filename = filename_from_url(url)
67 find = self.find_file(filename)
68 if not find:
69 self.create_raw_data_dir()
70 self.download_file(url, filename)
71
72 def find_file(self, filename):
73 result = Engine.find_file(self, filename)
74 if not hasattr(self, "all_files"): self.all_files = set()
75 if result: self.all_files.add(result)
76 return result
77
78 def register_files(self, filenames):
79 """Identify a list of files to be moved by the download
80
81 When downloading archives with multiple files the engine needs to be
82 informed of all of the file names so that it can move them.
83
84 """
85 full_filenames = {self.find_file(filename) for filename in filenames}
86 self.all_files = self.all_files.union(full_filenames)
87
88
89 # replace all other methods with a function that does nothing
90 def dummy_method(self, *args, **kwargs):
91 pass
92 methods = inspect.getmembers(engine, predicate=inspect.ismethod)
93 keep_methods = {'table_exists',
94 'get_connection',
95 'final_cleanup',
96 'auto_create_table',
97 'insert_data_from_url',
98 }
99 remove_methods = ['insert_data_from_file']
100 for name, method in methods:
101 if (not name in keep_methods
102 and not 'download' in name
103 and not 'file' in name
104 and not 'dir' in name):
105
106 setattr(engine, name, dummy_method)
107 for name in remove_methods:
108 setattr(engine, name, dummy_method)
109
[end of engines/download_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/engines/download_only.py b/engines/download_only.py
--- a/engines/download_only.py
+++ b/engines/download_only.py
@@ -51,7 +51,18 @@
print("Keeping existing copy.")
else:
print("Copying %s from %s" % (file_name_nopath, file_path))
- shutil.copy(file_name, self.opts['path'])
+ if os.path.isdir(self.opts['path']):
+ try:
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't copy file to %s" % self.opts['path'])
+ else:
+ try:
+ print("Creating directory %s" % self.opts['path'])
+ os.mkdir(self.opts['path'])
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't create directory %s" % self.opts['path'])
self.all_files = set()
def auto_create_table(self, table, url=None, filename=None, pk=None):
| {"golden_diff": "diff --git a/engines/download_only.py b/engines/download_only.py\n--- a/engines/download_only.py\n+++ b/engines/download_only.py\n@@ -51,7 +51,18 @@\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n- shutil.copy(file_name, self.opts['path'])\n+ if os.path.isdir(self.opts['path']):\n+ try:\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't copy file to %s\" % self.opts['path'])\n+ else:\n+ try:\n+ print(\"Creating directory %s\" % self.opts['path'])\n+ os.mkdir(self.opts['path'])\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't create directory %s\" % self.opts['path'])\n self.all_files = set()\n \n def auto_create_table(self, table, url=None, filename=None, pk=None):\n", "issue": "download command should probably fail when specified path does not exist\nA datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist. \n\n", "before_files": [{"content": "import os\nimport platform\nimport shutil\nimport inspect\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\nclass DummyConnection:\n def cursor(self):\n pass\n def commit(self):\n pass\n def rollback(self):\n pass\n def close(self):\n pass\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n ]\n\n def table_exists(self, dbname, tablename):\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n data_dir = self.format_data_dir()\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n if file_path == DATA_DIR:\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n shutil.copy(file_name, self.opts['path'])\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"): self.all_files = set()\n if result: self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (not name in keep_methods\n and not 'download' in name\n and not 'file' in name\n and not 'dir' in name):\n\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}]} | 1,596 | 231 |
gh_patches_debug_12065 | rasdani/github-patches | git_diff | tinygrad__tinygrad-65 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EOFError: Ran out of input
When running example and solving "Can't import fetch from utils" issue, this one comes up:

</issue>
<code>
[start of tinygrad/utils.py]
1 import numpy as np
2
3 def mask_like(like, mask_inx, mask_value = 1.0):
4 mask = np.zeros_like(like).reshape(-1)
5 mask[mask_inx] = mask_value
6 return mask.reshape(like.shape)
7
8 def layer_init_uniform(*x):
9 ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))
10 return ret.astype(np.float32)
11
12 def fetch(url):
13 import requests, os, hashlib, tempfile
14 fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
15 if os.path.isfile(fp):
16 with open(fp, "rb") as f:
17 dat = f.read()
18 else:
19 print("fetching %s" % url)
20 with open(fp+".tmp", "wb") as f:
21 dat = requests.get(url).content
22 f.write(dat)
23 os.rename(fp+".tmp", fp)
24 return dat
25
26 def fetch_mnist():
27 import gzip
28 parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
29 X_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
30 Y_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"))[8:]
31 X_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
32 Y_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"))[8:]
33 return X_train, Y_train, X_test, Y_test
34
35
[end of tinygrad/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tinygrad/utils.py b/tinygrad/utils.py
--- a/tinygrad/utils.py
+++ b/tinygrad/utils.py
@@ -1,4 +1,5 @@
import numpy as np
+import os
def mask_like(like, mask_inx, mask_value = 1.0):
mask = np.zeros_like(like).reshape(-1)
@@ -12,7 +13,7 @@
def fetch(url):
import requests, os, hashlib, tempfile
fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
- if os.path.isfile(fp):
+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:
with open(fp, "rb") as f:
dat = f.read()
else:
| {"golden_diff": "diff --git a/tinygrad/utils.py b/tinygrad/utils.py\n--- a/tinygrad/utils.py\n+++ b/tinygrad/utils.py\n@@ -1,4 +1,5 @@\n import numpy as np\n+import os\n \n def mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n@@ -12,7 +13,7 @@\n def fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n- if os.path.isfile(fp):\n+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n", "issue": "EOFError: Ran out of input\nWhen running example and solving \"Can't import fetch from utils\" issue, this one comes up:\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\ndef mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n mask[mask_inx] = mask_value\n return mask.reshape(like.shape)\n\ndef layer_init_uniform(*x):\n ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))\n return ret.astype(np.float32)\n\ndef fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n if os.path.isfile(fp):\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n print(\"fetching %s\" % url)\n with open(fp+\".tmp\", \"wb\") as f:\n dat = requests.get(url).content\n f.write(dat)\n os.rename(fp+\".tmp\", fp)\n return dat\n\ndef fetch_mnist():\n import gzip\n parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()\n X_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\"))[8:]\n X_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\"))[8:]\n return X_train, Y_train, X_test, Y_test\n\n", "path": "tinygrad/utils.py"}]} | 1,105 | 176 |
gh_patches_debug_20502 | rasdani/github-patches | git_diff | cloudtools__troposphere-1205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User
This property has been released on November 9 by AWS.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html
```
PermissionsBoundary
The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.
Required: No
Type: String
Update requires: No interruption
```
</issue>
<code>
[start of troposphere/iam.py]
1 # Copyright (c) 2012-2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import integer, boolean, status
8 from .validators import iam_path, iam_role_name, iam_group_name, iam_user_name
9
10 try:
11 from awacs.aws import Policy
12 policytypes = (dict, Policy)
13 except ImportError:
14 policytypes = dict,
15
16
17 Active = "Active"
18 Inactive = "Inactive"
19
20
21 class AccessKey(AWSObject):
22 resource_type = "AWS::IAM::AccessKey"
23
24 props = {
25 'Serial': (integer, False),
26 'Status': (status, False),
27 'UserName': (basestring, True),
28 }
29
30
31 class PolicyType(AWSObject):
32 resource_type = "AWS::IAM::Policy"
33
34 props = {
35 'Groups': ([basestring], False),
36 'PolicyDocument': (policytypes, True),
37 'PolicyName': (basestring, True),
38 'Roles': ([basestring], False),
39 'Users': ([basestring], False),
40 }
41
42
43 class Policy(AWSProperty):
44 props = {
45 'PolicyDocument': (policytypes, True),
46 'PolicyName': (basestring, True),
47 }
48
49
50 PolicyProperty = Policy
51
52
53 class Group(AWSObject):
54 resource_type = "AWS::IAM::Group"
55
56 props = {
57 'GroupName': (iam_group_name, False),
58 'ManagedPolicyArns': ([basestring], False),
59 'Path': (iam_path, False),
60 'Policies': ([Policy], False),
61 }
62
63
64 class InstanceProfile(AWSObject):
65 resource_type = "AWS::IAM::InstanceProfile"
66
67 props = {
68 'Path': (iam_path, False),
69 'Roles': (list, True),
70 'InstanceProfileName': (basestring, False),
71 }
72
73
74 class Role(AWSObject):
75 resource_type = "AWS::IAM::Role"
76
77 props = {
78 'AssumeRolePolicyDocument': (policytypes, True),
79 'ManagedPolicyArns': ([basestring], False),
80 'MaxSessionDuration': (integer, False),
81 'Path': (iam_path, False),
82 'Policies': ([Policy], False),
83 'RoleName': (iam_role_name, False),
84 }
85
86
87 class ServiceLinkedRole(AWSObject):
88 resource_type = "AWS::IAM::ServiceLinkedRole"
89
90 props = {
91 'AWSServiceName': (basestring, True),
92 'CustomSuffix': (basestring, False),
93 'Description': (basestring, False),
94 }
95
96
97 class LoginProfile(AWSProperty):
98 props = {
99 'Password': (basestring, True),
100 'PasswordResetRequired': (boolean, False),
101 }
102
103
104 class User(AWSObject):
105 resource_type = "AWS::IAM::User"
106
107 props = {
108 'Path': (iam_path, False),
109 'Groups': ([basestring], False),
110 'ManagedPolicyArns': ([basestring], False),
111 'LoginProfile': (LoginProfile, False),
112 'Policies': ([Policy], False),
113 'UserName': (iam_user_name, False),
114 }
115
116
117 class UserToGroupAddition(AWSObject):
118 resource_type = "AWS::IAM::UserToGroupAddition"
119
120 props = {
121 'GroupName': (basestring, True),
122 'Users': (list, True),
123 }
124
125
126 class ManagedPolicy(AWSObject):
127 resource_type = "AWS::IAM::ManagedPolicy"
128
129 props = {
130 'Description': (basestring, False),
131 'Groups': ([basestring], False),
132 'ManagedPolicyName': (basestring, False),
133 'Path': (iam_path, False),
134 'PolicyDocument': (policytypes, True),
135 'Roles': ([basestring], False),
136 'Users': ([basestring], False),
137 }
138
[end of troposphere/iam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/iam.py b/troposphere/iam.py
--- a/troposphere/iam.py
+++ b/troposphere/iam.py
@@ -79,6 +79,7 @@
'ManagedPolicyArns': ([basestring], False),
'MaxSessionDuration': (integer, False),
'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'RoleName': (iam_role_name, False),
}
@@ -105,10 +106,11 @@
resource_type = "AWS::IAM::User"
props = {
- 'Path': (iam_path, False),
'Groups': ([basestring], False),
- 'ManagedPolicyArns': ([basestring], False),
'LoginProfile': (LoginProfile, False),
+ 'ManagedPolicyArns': ([basestring], False),
+ 'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'UserName': (iam_user_name, False),
}
| {"golden_diff": "diff --git a/troposphere/iam.py b/troposphere/iam.py\n--- a/troposphere/iam.py\n+++ b/troposphere/iam.py\n@@ -79,6 +79,7 @@\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n@@ -105,10 +106,11 @@\n resource_type = \"AWS::IAM::User\"\n \n props = {\n- 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n- 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n+ 'ManagedPolicyArns': ([basestring], False),\n+ 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n", "issue": "Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User\nThis property has been released on November 9 by AWS.\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html\r\n```\r\nPermissionsBoundary\r\n\r\n The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.\r\n\r\n Required: No\r\n\r\n Type: String\r\n\r\n Update requires: No interruption\r\n\r\n```\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n 'InstanceProfileName': (basestring, False),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass ServiceLinkedRole(AWSObject):\n resource_type = \"AWS::IAM::ServiceLinkedRole\"\n\n props = {\n 'AWSServiceName': (basestring, True),\n 'CustomSuffix': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyName': (basestring, False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}]} | 1,816 | 256 |
gh_patches_debug_26528 | rasdani/github-patches | git_diff | ESMCI__cime-1048 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing f19_g16_r01 high resolution river flow grid
Between cime5.2.0-alpha.9 and cime5.2.0-alpha.20 the config_grids file format was changed, and one grid needed for CLM testing was removed. The change to add it back again is as follows...
```
[erik@yslogin4 scripts]$ svn diff ../cime_config/cesm/config_grids.xml
Index: ../cime_config/cesm/config_grids.xml
===================================================================
--- ../cime_config/cesm/config_grids.xml (revision 7095)
+++ ../cime_config/cesm/config_grids.xml (working copy)
@@ -294,6 +294,15 @@
<grid name="ocnice">gx1v6</grid>
</model_grid>
+ <model_grid alias="f19_g16_r01">
+ <grid name="atm">1.9x2.5</grid>
+ <grid name="lnd">1.9x2.5</grid>
+ <grid name="ocnice">gx1v6</grid>
+ <grid name="rof">r01</grid>
+ <mask>gx1v6</mask>
+ </model_grid>
+
+
<model_grid alias="f19_g16_gl4" compset="_CISM">
<grid name="atm">1.9x2.5</grid>
<grid name="lnd">1.9x2.5</grid>
```
@mvertens @jedwards4b @billsacks
</issue>
<code>
[start of utils/python/CIME/BuildTools/configure.py]
1 #!/usr/bin/env python
2
3 """This script writes CIME build information to a directory.
4
5 The pieces of information that will be written include:
6
7 1. Machine-specific build settings (i.e. the "Macros" file).
8 2. File-specific build settings (i.e. "Depends" files).
9 3. Environment variable loads (i.e. the env_mach_specific files).
10
11 The .env_mach_specific.sh and .env_mach_specific.csh files are specific to a
12 given compiler, MPI library, and DEBUG setting. By default, these will be the
13 machine's default compiler, the machine's default MPI library, and FALSE,
14 respectively. These can be changed by setting the environment variables
15 COMPILER, MPILIB, and DEBUG, respectively.
16 """
17
18 import shutil
19 from CIME.XML.standard_module_setup import *
20 from CIME.utils import expect
21 from CIME.XML.compilers import Compilers
22 from CIME.XML.env_mach_specific import EnvMachSpecific
23
24 logger = logging.getLogger(__name__)
25
26 def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):
27 """Add Macros, Depends, and env_mach_specific files to a directory.
28
29 Arguments:
30 machobj - Machines argument for this machine.
31 output_dir - Directory in which to place output.
32 macros_format - Container containing the string 'Makefile' to produce
33 Makefile Macros output, and/or 'CMake' for CMake output.
34 compiler - String containing the compiler vendor to configure for.
35 mpilib - String containing the MPI implementation to configure for.
36 debug - Boolean specifying whether debugging options are enabled.
37 """
38 # Macros generation.
39 suffixes = {'Makefile': 'make', 'CMake': 'cmake'}
40 macro_maker = Compilers(machobj)
41 for form in macros_format:
42 out_file_name = os.path.join(output_dir,"Macros."+suffixes[form])
43 macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])
44
45 _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)
46 _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,
47 debug, sysos)
48
49 def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):
50 """
51 Copy any system or compiler Depends files if they do not exist in the output directory
52 """
53 for dep in (machine_name, compiler):
54 dfile = os.path.join(machines_dir, "Depends.%s"%dep)
55 outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
56 if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
57 shutil.copyfile(dfile, outputdfile)
58 dfile = os.path.join(machines_dir, "Depends.%s.%s"%(machine_name,compiler))
59 outputdfile = os.path.join(output_dir, "Depends.%s.%s"%(machine_name,compiler))
60 if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
61 shutil.copyfile(dfile, outputdfile)
62
63 def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):
64 """
65 env_mach_specific generation.
66 """
67 ems_path = os.path.join(output_dir, "env_mach_specific.xml")
68 if os.path.exists(ems_path):
69 logger.warn("%s already exists, delete to replace"%ems_path)
70 return
71 ems_file = EnvMachSpecific(output_dir)
72 ems_file.populate(machobj)
73 ems_file.write()
74 for shell in ('sh', 'csh'):
75 ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)
76 shell_path = os.path.join(output_dir, ".env_mach_specific." + shell)
77 with open(shell_path, 'a') as shell_file:
78 if shell == 'sh':
79 shell_file.write("\nexport COMPILER=%s\n" % compiler)
80 shell_file.write("export MPILIB=%s\n" % mpilib)
81 shell_file.write("export DEBUG=%s\n" % repr(debug).upper())
82 shell_file.write("export OS=%s\n" % sysos)
83 else:
84 shell_file.write("\nsetenv COMPILER %s\n" % compiler)
85 shell_file.write("setenv MPILIB %s\n" % mpilib)
86 shell_file.write("setenv DEBUG %s\n" % repr(debug).upper())
87 shell_file.write("setenv OS %s\n" % sysos)
88
[end of utils/python/CIME/BuildTools/configure.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/python/CIME/BuildTools/configure.py b/utils/python/CIME/BuildTools/configure.py
--- a/utils/python/CIME/BuildTools/configure.py
+++ b/utils/python/CIME/BuildTools/configure.py
@@ -49,16 +49,20 @@
def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):
"""
Copy any system or compiler Depends files if they do not exist in the output directory
+ If there is a match for Depends.machine_name.compiler copy that and ignore the others
"""
- for dep in (machine_name, compiler):
- dfile = os.path.join(machines_dir, "Depends.%s"%dep)
- outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
- shutil.copyfile(dfile, outputdfile)
dfile = os.path.join(machines_dir, "Depends.%s.%s"%(machine_name,compiler))
outputdfile = os.path.join(output_dir, "Depends.%s.%s"%(machine_name,compiler))
- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
- shutil.copyfile(dfile, outputdfile)
+ if os.path.isfile(dfile):
+ if not os.path.isfile(outputdfile):
+ shutil.copyfile(dfile, outputdfile)
+ else:
+ for dep in (machine_name, compiler):
+ dfile = os.path.join(machines_dir, "Depends.%s"%dep)
+ outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
+ if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
+ shutil.copyfile(dfile, outputdfile)
+
def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):
"""
| {"golden_diff": "diff --git a/utils/python/CIME/BuildTools/configure.py b/utils/python/CIME/BuildTools/configure.py\n--- a/utils/python/CIME/BuildTools/configure.py\n+++ b/utils/python/CIME/BuildTools/configure.py\n@@ -49,16 +49,20 @@\n def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n+ If there is a match for Depends.machine_name.compiler copy that and ignore the others\n \"\"\"\n- for dep in (machine_name, compiler):\n- dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n- outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n- shutil.copyfile(dfile, outputdfile)\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n- shutil.copyfile(dfile, outputdfile)\n+ if os.path.isfile(dfile):\n+ if not os.path.isfile(outputdfile):\n+ shutil.copyfile(dfile, outputdfile)\n+ else:\n+ for dep in (machine_name, compiler):\n+ dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n+ outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n+ if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n+ shutil.copyfile(dfile, outputdfile)\n+\n \n def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n", "issue": "Missing f19_g16_r01 high resolution river flow grid\nBetween cime5.2.0-alpha.9 and cime5.2.0-alpha.20 the config_grids file format was changed, and one grid needed for CLM testing was removed. The change to add it back again is as follows...\r\n\r\n```\r\n[erik@yslogin4 scripts]$ svn diff ../cime_config/cesm/config_grids.xml \r\nIndex: ../cime_config/cesm/config_grids.xml\r\n===================================================================\r\n--- ../cime_config/cesm/config_grids.xml\t(revision 7095)\r\n+++ ../cime_config/cesm/config_grids.xml\t(working copy)\r\n@@ -294,6 +294,15 @@\r\n <grid name=\"ocnice\">gx1v6</grid>\r\n </model_grid>\r\n \r\n+ <model_grid alias=\"f19_g16_r01\">\r\n+ <grid name=\"atm\">1.9x2.5</grid>\r\n+ <grid name=\"lnd\">1.9x2.5</grid>\r\n+ <grid name=\"ocnice\">gx1v6</grid>\r\n+ <grid name=\"rof\">r01</grid>\r\n+ <mask>gx1v6</mask>\r\n+ </model_grid>\r\n+\r\n+\r\n <model_grid alias=\"f19_g16_gl4\" compset=\"_CISM\">\r\n <grid name=\"atm\">1.9x2.5</grid>\r\n <grid name=\"lnd\">1.9x2.5</grid>\r\n\r\n```\r\n@mvertens @jedwards4b @billsacks\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"This script writes CIME build information to a directory.\n\nThe pieces of information that will be written include:\n\n1. Machine-specific build settings (i.e. the \"Macros\" file).\n2. File-specific build settings (i.e. \"Depends\" files).\n3. Environment variable loads (i.e. the env_mach_specific files).\n\nThe .env_mach_specific.sh and .env_mach_specific.csh files are specific to a\ngiven compiler, MPI library, and DEBUG setting. By default, these will be the\nmachine's default compiler, the machine's default MPI library, and FALSE,\nrespectively. These can be changed by setting the environment variables\nCOMPILER, MPILIB, and DEBUG, respectively.\n\"\"\"\n\nimport shutil\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect\nfrom CIME.XML.compilers import Compilers\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\n\nlogger = logging.getLogger(__name__)\n\ndef configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):\n \"\"\"Add Macros, Depends, and env_mach_specific files to a directory.\n\n Arguments:\n machobj - Machines argument for this machine.\n output_dir - Directory in which to place output.\n macros_format - Container containing the string 'Makefile' to produce\n Makefile Macros output, and/or 'CMake' for CMake output.\n compiler - String containing the compiler vendor to configure for.\n mpilib - String containing the MPI implementation to configure for.\n debug - Boolean specifying whether debugging options are enabled.\n \"\"\"\n # Macros generation.\n suffixes = {'Makefile': 'make', 'CMake': 'cmake'}\n macro_maker = Compilers(machobj)\n for form in macros_format:\n out_file_name = os.path.join(output_dir,\"Macros.\"+suffixes[form])\n macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])\n\n _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)\n _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,\n debug, sysos)\n\ndef _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n \"\"\"\n for dep in (machine_name, compiler):\n dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n\ndef _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n env_mach_specific generation.\n \"\"\"\n ems_path = os.path.join(output_dir, \"env_mach_specific.xml\")\n if os.path.exists(ems_path):\n logger.warn(\"%s already exists, delete to replace\"%ems_path)\n return\n ems_file = EnvMachSpecific(output_dir)\n ems_file.populate(machobj)\n ems_file.write()\n for shell in ('sh', 'csh'):\n ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)\n shell_path = os.path.join(output_dir, \".env_mach_specific.\" + shell)\n with open(shell_path, 'a') as shell_file:\n if shell == 'sh':\n shell_file.write(\"\\nexport COMPILER=%s\\n\" % compiler)\n shell_file.write(\"export MPILIB=%s\\n\" % mpilib)\n shell_file.write(\"export DEBUG=%s\\n\" % repr(debug).upper())\n shell_file.write(\"export OS=%s\\n\" % sysos)\n else:\n shell_file.write(\"\\nsetenv COMPILER %s\\n\" % compiler)\n shell_file.write(\"setenv MPILIB %s\\n\" % mpilib)\n shell_file.write(\"setenv DEBUG %s\\n\" % repr(debug).upper())\n shell_file.write(\"setenv OS %s\\n\" % sysos)\n", "path": "utils/python/CIME/BuildTools/configure.py"}]} | 2,068 | 427 |
gh_patches_debug_19916 | rasdani/github-patches | git_diff | weecology__retriever-1121 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a description field(s) to setup.py
This populates the description on PYPI:
https://packaging.python.org/tutorials/distributing-packages/#description
</issue>
<code>
[start of setup.py]
1 """Use the following command to install retriever: python setup.py install"""
2 from __future__ import absolute_import
3
4 import os
5 import platform
6
7 from pkg_resources import parse_version
8 from setuptools import setup, find_packages
9
10 current_platform = platform.system().lower()
11 extra_includes = []
12 if current_platform == "windows":
13 extra_includes += ["pypyodbc"]
14
15 if os.path.exists(".git/hooks"): # check if we are in git repo
16 os.system("cp hooks/pre-commit .git/hooks/pre-commit")
17 os.system("chmod +x .git/hooks/pre-commit")
18
19 app_data = "~/.retriever/scripts"
20 if os.path.exists(app_data):
21 os.system("rm -r {}".format(app_data))
22
23 __version__ = 'v2.1.dev'
24 with open(os.path.join("retriever", "_version.py"), "w") as version_file:
25 version_file.write("__version__ = " + "'" + __version__ + "'\n")
26 version_file.close()
27
28
29 def clean_version(v):
30 return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
31
32 includes = [
33 'xlrd',
34 'future',
35 'argcomplete',
36 'pymysql',
37 'psycopg2',
38 'sqlite3',
39 ] + extra_includes
40
41 excludes = [
42 'pyreadline',
43 'doctest',
44 'pickle',
45 'pdb',
46 'pywin', 'pywin.debugger',
47 'pywin.debugger.dbgcon',
48 'pywin.dialogs', 'pywin.dialogs.list',
49 'Tkconstants', 'Tkinter', 'tcl', 'tk'
50 ]
51
52 setup(name='retriever',
53 version=clean_version(__version__),
54 description='Data Retriever',
55 author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
56 author_email='[email protected]',
57 url='https://github.com/weecology/retriever',
58 classifiers=['Intended Audience :: Science/Research',
59 'License :: OSI Approved :: MIT License',
60 'Programming Language :: Python',
61 'Programming Language :: Python :: 2',
62 'Programming Language :: Python :: 3', ],
63 packages=find_packages(
64 exclude=['hooks',
65 'docs',
66 'tests',
67 'scripts',
68 'docker',
69 ".cache"]),
70 entry_points={
71 'console_scripts': [
72 'retriever = retriever.__main__:main',
73 ],
74 },
75 install_requires=[
76 'xlrd',
77 'future',
78 'argcomplete',
79 'tqdm'
80 ],
81 data_files=[('', ['CITATION'])],
82 setup_requires=[],
83 )
84
85 # windows doesn't have bash. No point in using bash-completion
86 if current_platform != "windows":
87 # if platform is OS X use "~/.bash_profile"
88 if current_platform == "darwin":
89 bash_file = "~/.bash_profile"
90 # if platform is Linux use "~/.bashrc
91 elif current_platform == "linux":
92 bash_file = "~/.bashrc"
93 # else write and discard
94 else:
95 bash_file = "/dev/null"
96
97 argcomplete_command = 'eval "$(register-python-argcomplete retriever)"'
98 with open(os.path.expanduser(bash_file), "a+") as bashrc:
99 bashrc.seek(0)
100 # register retriever for arg-completion if not already registered
101 # whenever a new shell is spawned
102 if argcomplete_command not in bashrc.read():
103 bashrc.write(argcomplete_command + "\n")
104 bashrc.close()
105 os.system("activate-global-python-argcomplete")
106 # register for the current shell
107 os.system(argcomplete_command)
108
109 try:
110 from retriever.compile import compile
111 from retriever.lib.repository import check_for_updates
112
113 check_for_updates(False)
114 compile()
115 except:
116 pass
117
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,7 @@
def clean_version(v):
return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
+
includes = [
'xlrd',
'future',
@@ -52,6 +53,10 @@
setup(name='retriever',
version=clean_version(__version__),
description='Data Retriever',
+ long_description=('The Data Retriever is a package manager for data. '
+ 'It downloads, cleans, and stores publicly available data, '
+ 'so that analysts spend less time cleaning and managing data, '
+ 'and more time analyzing it.'),
author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
author_email='[email protected]',
url='https://github.com/weecology/retriever',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,6 +29,7 @@\n def clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n \n+\n includes = [\n 'xlrd',\n 'future',\n@@ -52,6 +53,10 @@\n setup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n+ long_description=('The Data Retriever is a package manager for data. '\n+ 'It downloads, cleans, and stores publicly available data, '\n+ 'so that analysts spend less time cleaning and managing data, '\n+ 'and more time analyzing it.'),\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n", "issue": "Add a description field(s) to setup.py\nThis populates the description on PYPI:\r\n\r\nhttps://packaging.python.org/tutorials/distributing-packages/#description\n", "before_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.1.dev'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3', ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n )\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates(False)\n compile()\nexcept:\n pass\n", "path": "setup.py"}]} | 1,645 | 217 |
gh_patches_debug_37237 | rasdani/github-patches | git_diff | Parsl__parsl-1075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Condor provider has a hard-coded `cmd_timeout`.
This timeout should be configurable and probably default to something much longer.
https://github.com/Parsl/parsl/blob/1d8c9e35752274af6ae2ce2f07107474ece4561c/parsl/providers/condor/condor.py#L225
cc @ZhuozhaoLi who noted this in a comment to #889
</issue>
<code>
[start of parsl/providers/condor/condor.py]
1 import logging
2 import os
3 import re
4 import time
5
6 from parsl.channels import LocalChannel
7 from parsl.utils import RepresentationMixin
8 from parsl.launchers import SingleNodeLauncher
9 from parsl.providers.condor.template import template_string
10 from parsl.providers.cluster_provider import ClusterProvider
11
12 logger = logging.getLogger(__name__)
13
14 # See http://pages.cs.wisc.edu/~adesmet/status.html
15 translate_table = {
16 '1': 'PENDING',
17 '2': 'RUNNING',
18 '3': 'CANCELLED',
19 '4': 'COMPLETED',
20 '5': 'FAILED',
21 '6': 'FAILED',
22 }
23
24
25 class CondorProvider(RepresentationMixin, ClusterProvider):
26 """HTCondor Execution Provider.
27
28 Parameters
29 ----------
30 channel : Channel
31 Channel for accessing this provider. Possible channels include
32 :class:`~parsl.channels.LocalChannel` (the default),
33 :class:`~parsl.channels.SSHChannel`, or
34 :class:`~parsl.channels.SSHInteractiveLoginChannel`.
35 nodes_per_block : int
36 Nodes to provision per block.
37 init_blocks : int
38 Number of blocks to provision at time of initialization
39 min_blocks : int
40 Minimum number of blocks to maintain
41 max_blocks : int
42 Maximum number of blocks to maintain.
43 parallelism : float
44 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
45 scaling where as many resources as possible are used; parallelism close to 0 represents
46 the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
47 environment : dict of str
48 A dictionary of environmant variable name and value pairs which will be set before
49 running a task.
50 project : str
51 Project which the job will be charged against
52 scheduler_options : str
53 String to add specific condor attributes to the HTCondor submit script.
54 transfer_input_files : list(str)
55 List of strings of paths to additional files or directories to transfer to the job
56 worker_init : str
57 Command to be run before starting a worker.
58 requirements : str
59 Condor requirements.
60 launcher : Launcher
61 Launcher for this provider. Possible launchers include
62 :class:`~parsl.launchers.SingleNodeLauncher` (the default),
63 """
64 def __init__(self,
65 channel=LocalChannel(),
66 nodes_per_block=1,
67 init_blocks=1,
68 min_blocks=0,
69 max_blocks=10,
70 parallelism=1,
71 environment=None,
72 project='',
73 scheduler_options='',
74 transfer_input_files=[],
75 walltime="00:10:00",
76 worker_init='',
77 launcher=SingleNodeLauncher(),
78 requirements=''):
79
80 label = 'condor'
81 super().__init__(label,
82 channel,
83 nodes_per_block,
84 init_blocks,
85 min_blocks,
86 max_blocks,
87 parallelism,
88 walltime,
89 launcher)
90
91 self.provisioned_blocks = 0
92
93 self.environment = environment if environment is not None else {}
94 for key, value in self.environment.items():
95 # To escape literal quote marks, double them
96 # See: http://research.cs.wisc.edu/htcondor/manual/v8.6/condor_submit.html
97 try:
98 self.environment[key] = "'{}'".format(value.replace("'", '"').replace('"', '""'))
99 except AttributeError:
100 pass
101
102 self.project = project
103 self.scheduler_options = scheduler_options
104 self.worker_init = worker_init
105 self.requirements = requirements
106 self.transfer_input_files = transfer_input_files
107
108 def _status(self):
109 """Update the resource dictionary with job statuses."""
110
111 job_id_list = ' '.join(self.resources.keys())
112 cmd = "condor_q {0} -af:jr JobStatus".format(job_id_list)
113 retcode, stdout, stderr = super().execute_wait(cmd)
114 """
115 Example output:
116
117 $ condor_q 34524642.0 34524643.0 -af:jr JobStatus
118 34524642.0 2
119 34524643.0 1
120 """
121
122 for line in stdout.strip().split('\n'):
123 parts = line.split()
124 job_id = parts[0]
125 status = translate_table.get(parts[1], 'UNKNOWN')
126 self.resources[job_id]['status'] = status
127
128 def status(self, job_ids):
129 """Get the status of a list of jobs identified by their ids.
130
131 Parameters
132 ----------
133 job_ids : list of int
134 Identifiers of jobs for which the status will be returned.
135
136 Returns
137 -------
138 List of int
139 Status codes for the requested jobs.
140
141 """
142 self._status()
143 return [self.resources[jid]['status'] for jid in job_ids]
144
145 def submit(self, command, blocksize, tasks_per_node, job_name="parsl.auto"):
146 """Submits the command onto an Local Resource Manager job of blocksize parallel elements.
147
148 example file with the complex case of multiple submits per job:
149 Universe =vanilla
150 output = out.$(Cluster).$(Process)
151 error = err.$(Cluster).$(Process)
152 log = log.$(Cluster)
153 leave_in_queue = true
154 executable = test.sh
155 queue 5
156 executable = foo
157 queue 1
158
159 $ condor_submit test.sub
160 Submitting job(s)......
161 5 job(s) submitted to cluster 118907.
162 1 job(s) submitted to cluster 118908.
163
164 Parameters
165 ----------
166 command : str
167 Command to execute
168 blocksize : int
169 Number of blocks to request.
170 job_name : str
171 Job name prefix.
172 tasks_per_node : int
173 command invocations to be launched per node
174 Returns
175 -------
176 None or str
177 None if at capacity and cannot provision more; otherwise the identifier for the job.
178 """
179
180 logger.debug("Attempting to launch with blocksize: {}".format(blocksize))
181 if self.provisioned_blocks >= self.max_blocks:
182 template = "Provider {} is currently using {} blocks while max_blocks is {}; no blocks will be added"
183 logger.warn(template.format(self.label, self.provisioned_blocks, self.max_blocks))
184 return None
185
186 # Note: Fix this later to avoid confusing behavior.
187 # We should always allocate blocks in integer counts of node_granularity
188 blocksize = max(self.nodes_per_block, blocksize)
189
190 job_name = "parsl.{0}.{1}".format(job_name, time.time())
191
192 script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
193 script_path = os.path.abspath(script_path)
194 userscript_path = "{0}/{1}.script".format(self.script_dir, job_name)
195 userscript_path = os.path.abspath(userscript_path)
196
197 self.environment["JOBNAME"] = "'{}'".format(job_name)
198
199 job_config = {}
200 job_config["job_name"] = job_name
201 job_config["submit_script_dir"] = self.channel.script_dir
202 job_config["project"] = self.project
203 job_config["nodes"] = self.nodes_per_block
204 job_config["scheduler_options"] = self.scheduler_options
205 job_config["worker_init"] = self.worker_init
206 job_config["user_script"] = command
207 job_config["tasks_per_node"] = tasks_per_node
208 job_config["requirements"] = self.requirements
209 job_config["environment"] = ' '.join(['{}={}'.format(key, value) for key, value in self.environment.items()])
210
211 # Move the user script
212 # This is where the command should be wrapped by the launchers.
213 wrapped_command = self.launcher(command,
214 tasks_per_node,
215 self.nodes_per_block)
216
217 with open(userscript_path, 'w') as f:
218 f.write(job_config["worker_init"] + '\n' + wrapped_command)
219
220 user_script_path = self.channel.push_file(userscript_path, self.channel.script_dir)
221 the_input_files = [user_script_path] + self.transfer_input_files
222 job_config["input_files"] = ','.join(the_input_files)
223 job_config["job_script"] = os.path.basename(user_script_path)
224
225 # Construct and move the submit script
226 self._write_submit_script(template_string, script_path, job_name, job_config)
227 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
228
229 cmd = "condor_submit {0}".format(channel_script_path)
230 retcode, stdout, stderr = super().execute_wait(cmd, 30)
231 logger.debug("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
232
233 job_id = []
234
235 if retcode == 0:
236 for line in stdout.split('\n'):
237 if re.match('^[0-9]', line) is not None:
238 cluster = line.split(" ")[5]
239 # We know the first job id ("process" in condor terms) within a
240 # cluster is 0 and we know the total number of jobs from
241 # condor_submit, so we use some list comprehensions to expand
242 # the condor_submit output into job IDs
243 # e.g., ['118907.0', '118907.1', '118907.2', '118907.3', '118907.4', '118908.0']
244 processes = [str(x) for x in range(0, int(line[0]))]
245 job_id += [cluster + process for process in processes]
246
247 self._add_resource(job_id)
248 return job_id[0]
249
250 def cancel(self, job_ids):
251 """Cancels the jobs specified by a list of job IDs.
252
253 Parameters
254 ----------
255 job_ids : list of str
256 The job IDs to cancel.
257
258 Returns
259 -------
260 list of bool
261 Each entry in the list will be True if the job is cancelled succesfully, otherwise False.
262 """
263
264 job_id_list = ' '.join(job_ids)
265 cmd = "condor_rm {0}; condor_rm -forcex {0}".format(job_id_list)
266 logger.debug("Attempting removal of jobs : {0}".format(cmd))
267 retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)
268 rets = None
269 if retcode == 0:
270 for jid in job_ids:
271 self.resources[jid]['status'] = 'CANCELLED'
272 rets = [True for i in job_ids]
273 else:
274 rets = [False for i in job_ids]
275
276 return rets
277
278 @property
279 def scaling_enabled(self):
280 return True
281
282 @property
283 def current_capacity(self):
284 return self
285
286 def _add_resource(self, job_id):
287 for jid in job_id:
288 self.resources[jid] = {'status': 'PENDING', 'size': 1}
289 return True
290
291
292 if __name__ == "__main__":
293
294 print("None")
295
[end of parsl/providers/condor/condor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/providers/condor/condor.py b/parsl/providers/condor/condor.py
--- a/parsl/providers/condor/condor.py
+++ b/parsl/providers/condor/condor.py
@@ -60,6 +60,8 @@
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~parsl.launchers.SingleNodeLauncher` (the default),
+ cmd_timeout : int
+ Timeout for commands made to the scheduler in seconds
"""
def __init__(self,
channel=LocalChannel(),
@@ -75,7 +77,8 @@
walltime="00:10:00",
worker_init='',
launcher=SingleNodeLauncher(),
- requirements=''):
+ requirements='',
+ cmd_timeout=60):
label = 'condor'
super().__init__(label,
@@ -86,8 +89,8 @@
max_blocks,
parallelism,
walltime,
- launcher)
-
+ launcher,
+ cmd_timeout=cmd_timeout)
self.provisioned_blocks = 0
self.environment = environment if environment is not None else {}
@@ -227,7 +230,7 @@
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
cmd = "condor_submit {0}".format(channel_script_path)
- retcode, stdout, stderr = super().execute_wait(cmd, 30)
+ retcode, stdout, stderr = super().execute_wait(cmd)
logger.debug("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
job_id = []
@@ -264,7 +267,7 @@
job_id_list = ' '.join(job_ids)
cmd = "condor_rm {0}; condor_rm -forcex {0}".format(job_id_list)
logger.debug("Attempting removal of jobs : {0}".format(cmd))
- retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)
+ retcode, stdout, stderr = super().execute_wait(cmd)
rets = None
if retcode == 0:
for jid in job_ids:
| {"golden_diff": "diff --git a/parsl/providers/condor/condor.py b/parsl/providers/condor/condor.py\n--- a/parsl/providers/condor/condor.py\n+++ b/parsl/providers/condor/condor.py\n@@ -60,6 +60,8 @@\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.SingleNodeLauncher` (the default),\n+ cmd_timeout : int\n+ Timeout for commands made to the scheduler in seconds\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n@@ -75,7 +77,8 @@\n walltime=\"00:10:00\",\n worker_init='',\n launcher=SingleNodeLauncher(),\n- requirements=''):\n+ requirements='',\n+ cmd_timeout=60):\n \n label = 'condor'\n super().__init__(label,\n@@ -86,8 +89,8 @@\n max_blocks,\n parallelism,\n walltime,\n- launcher)\n-\n+ launcher,\n+ cmd_timeout=cmd_timeout)\n self.provisioned_blocks = 0\n \n self.environment = environment if environment is not None else {}\n@@ -227,7 +230,7 @@\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n \n cmd = \"condor_submit {0}\".format(channel_script_path)\n- retcode, stdout, stderr = super().execute_wait(cmd, 30)\n+ retcode, stdout, stderr = super().execute_wait(cmd)\n logger.debug(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n \n job_id = []\n@@ -264,7 +267,7 @@\n job_id_list = ' '.join(job_ids)\n cmd = \"condor_rm {0}; condor_rm -forcex {0}\".format(job_id_list)\n logger.debug(\"Attempting removal of jobs : {0}\".format(cmd))\n- retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)\n+ retcode, stdout, stderr = super().execute_wait(cmd)\n rets = None\n if retcode == 0:\n for jid in job_ids:\n", "issue": "Condor provider has a hard-coded `cmd_timeout`. \nThis timeout should be configurable and probably default to something much longer.\r\n\r\nhttps://github.com/Parsl/parsl/blob/1d8c9e35752274af6ae2ce2f07107474ece4561c/parsl/providers/condor/condor.py#L225\r\n\r\ncc @ZhuozhaoLi who noted this in a comment to #889 \n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.utils import RepresentationMixin\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.providers.condor.template import template_string\nfrom parsl.providers.cluster_provider import ClusterProvider\n\nlogger = logging.getLogger(__name__)\n\n# See http://pages.cs.wisc.edu/~adesmet/status.html\ntranslate_table = {\n '1': 'PENDING',\n '2': 'RUNNING',\n '3': 'CANCELLED',\n '4': 'COMPLETED',\n '5': 'FAILED',\n '6': 'FAILED',\n}\n\n\nclass CondorProvider(RepresentationMixin, ClusterProvider):\n \"\"\"HTCondor Execution Provider.\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n nodes_per_block : int\n Nodes to provision per block.\n init_blocks : int\n Number of blocks to provision at time of initialization\n min_blocks : int\n Minimum number of blocks to maintain\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n environment : dict of str\n A dictionary of environmant variable name and value pairs which will be set before\n running a task.\n project : str\n Project which the job will be charged against\n scheduler_options : str\n String to add specific condor attributes to the HTCondor submit script.\n transfer_input_files : list(str)\n List of strings of paths to additional files or directories to transfer to the job\n worker_init : str\n Command to be run before starting a worker.\n requirements : str\n Condor requirements.\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.SingleNodeLauncher` (the default),\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n nodes_per_block=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=10,\n parallelism=1,\n environment=None,\n project='',\n scheduler_options='',\n transfer_input_files=[],\n walltime=\"00:10:00\",\n worker_init='',\n launcher=SingleNodeLauncher(),\n requirements=''):\n\n label = 'condor'\n super().__init__(label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher)\n\n self.provisioned_blocks = 0\n\n self.environment = environment if environment is not None else {}\n for key, value in self.environment.items():\n # To escape literal quote marks, double them\n # See: http://research.cs.wisc.edu/htcondor/manual/v8.6/condor_submit.html\n try:\n self.environment[key] = \"'{}'\".format(value.replace(\"'\", '\"').replace('\"', '\"\"'))\n except AttributeError:\n pass\n\n self.project = project\n self.scheduler_options = scheduler_options\n self.worker_init = worker_init\n self.requirements = requirements\n self.transfer_input_files = transfer_input_files\n\n def _status(self):\n \"\"\"Update the resource dictionary with job statuses.\"\"\"\n\n job_id_list = ' '.join(self.resources.keys())\n cmd = \"condor_q {0} -af:jr JobStatus\".format(job_id_list)\n retcode, stdout, stderr = super().execute_wait(cmd)\n \"\"\"\n Example output:\n\n $ condor_q 34524642.0 34524643.0 -af:jr JobStatus\n 34524642.0 2\n 34524643.0 1\n \"\"\"\n\n for line in stdout.strip().split('\\n'):\n parts = line.split()\n job_id = parts[0]\n status = translate_table.get(parts[1], 'UNKNOWN')\n self.resources[job_id]['status'] = status\n\n def status(self, job_ids):\n \"\"\"Get the status of a list of jobs identified by their ids.\n\n Parameters\n ----------\n job_ids : list of int\n Identifiers of jobs for which the status will be returned.\n\n Returns\n -------\n List of int\n Status codes for the requested jobs.\n\n \"\"\"\n self._status()\n return [self.resources[jid]['status'] for jid in job_ids]\n\n def submit(self, command, blocksize, tasks_per_node, job_name=\"parsl.auto\"):\n \"\"\"Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n\n example file with the complex case of multiple submits per job:\n Universe =vanilla\n output = out.$(Cluster).$(Process)\n error = err.$(Cluster).$(Process)\n log = log.$(Cluster)\n leave_in_queue = true\n executable = test.sh\n queue 5\n executable = foo\n queue 1\n\n $ condor_submit test.sub\n Submitting job(s)......\n 5 job(s) submitted to cluster 118907.\n 1 job(s) submitted to cluster 118908.\n\n Parameters\n ----------\n command : str\n Command to execute\n blocksize : int\n Number of blocks to request.\n job_name : str\n Job name prefix.\n tasks_per_node : int\n command invocations to be launched per node\n Returns\n -------\n None or str\n None if at capacity and cannot provision more; otherwise the identifier for the job.\n \"\"\"\n\n logger.debug(\"Attempting to launch with blocksize: {}\".format(blocksize))\n if self.provisioned_blocks >= self.max_blocks:\n template = \"Provider {} is currently using {} blocks while max_blocks is {}; no blocks will be added\"\n logger.warn(template.format(self.label, self.provisioned_blocks, self.max_blocks))\n return None\n\n # Note: Fix this later to avoid confusing behavior.\n # We should always allocate blocks in integer counts of node_granularity\n blocksize = max(self.nodes_per_block, blocksize)\n\n job_name = \"parsl.{0}.{1}\".format(job_name, time.time())\n\n script_path = \"{0}/{1}.submit\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n userscript_path = \"{0}/{1}.script\".format(self.script_dir, job_name)\n userscript_path = os.path.abspath(userscript_path)\n\n self.environment[\"JOBNAME\"] = \"'{}'\".format(job_name)\n\n job_config = {}\n job_config[\"job_name\"] = job_name\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"project\"] = self.project\n job_config[\"nodes\"] = self.nodes_per_block\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n job_config[\"tasks_per_node\"] = tasks_per_node\n job_config[\"requirements\"] = self.requirements\n job_config[\"environment\"] = ' '.join(['{}={}'.format(key, value) for key, value in self.environment.items()])\n\n # Move the user script\n # This is where the command should be wrapped by the launchers.\n wrapped_command = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n with open(userscript_path, 'w') as f:\n f.write(job_config[\"worker_init\"] + '\\n' + wrapped_command)\n\n user_script_path = self.channel.push_file(userscript_path, self.channel.script_dir)\n the_input_files = [user_script_path] + self.transfer_input_files\n job_config[\"input_files\"] = ','.join(the_input_files)\n job_config[\"job_script\"] = os.path.basename(user_script_path)\n\n # Construct and move the submit script\n self._write_submit_script(template_string, script_path, job_name, job_config)\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n cmd = \"condor_submit {0}\".format(channel_script_path)\n retcode, stdout, stderr = super().execute_wait(cmd, 30)\n logger.debug(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n\n job_id = []\n\n if retcode == 0:\n for line in stdout.split('\\n'):\n if re.match('^[0-9]', line) is not None:\n cluster = line.split(\" \")[5]\n # We know the first job id (\"process\" in condor terms) within a\n # cluster is 0 and we know the total number of jobs from\n # condor_submit, so we use some list comprehensions to expand\n # the condor_submit output into job IDs\n # e.g., ['118907.0', '118907.1', '118907.2', '118907.3', '118907.4', '118908.0']\n processes = [str(x) for x in range(0, int(line[0]))]\n job_id += [cluster + process for process in processes]\n\n self._add_resource(job_id)\n return job_id[0]\n\n def cancel(self, job_ids):\n \"\"\"Cancels the jobs specified by a list of job IDs.\n\n Parameters\n ----------\n job_ids : list of str\n The job IDs to cancel.\n\n Returns\n -------\n list of bool\n Each entry in the list will be True if the job is cancelled succesfully, otherwise False.\n \"\"\"\n\n job_id_list = ' '.join(job_ids)\n cmd = \"condor_rm {0}; condor_rm -forcex {0}\".format(job_id_list)\n logger.debug(\"Attempting removal of jobs : {0}\".format(cmd))\n retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)\n rets = None\n if retcode == 0:\n for jid in job_ids:\n self.resources[jid]['status'] = 'CANCELLED'\n rets = [True for i in job_ids]\n else:\n rets = [False for i in job_ids]\n\n return rets\n\n @property\n def scaling_enabled(self):\n return True\n\n @property\n def current_capacity(self):\n return self\n\n def _add_resource(self, job_id):\n for jid in job_id:\n self.resources[jid] = {'status': 'PENDING', 'size': 1}\n return True\n\n\nif __name__ == \"__main__\":\n\n print(\"None\")\n", "path": "parsl/providers/condor/condor.py"}]} | 3,865 | 494 |
gh_patches_debug_51313 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5128 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
filters.farid missing from skimage.filters documentation
## Description
The `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)
</issue>
<code>
[start of skimage/filters/__init__.py]
1 from .lpi_filter import inverse, wiener, LPIFilter2D
2 from ._gaussian import (gaussian, _guess_spatial_dimensions,
3 difference_of_gaussians)
4 from .edges import (sobel, sobel_h, sobel_v,
5 scharr, scharr_h, scharr_v,
6 prewitt, prewitt_h, prewitt_v,
7 roberts, roberts_pos_diag, roberts_neg_diag,
8 laplace,
9 farid, farid_h, farid_v)
10 from ._rank_order import rank_order
11 from ._gabor import gabor_kernel, gabor
12 from .thresholding import (threshold_local, threshold_otsu, threshold_yen,
13 threshold_isodata, threshold_li, threshold_minimum,
14 threshold_mean, threshold_triangle,
15 threshold_niblack, threshold_sauvola,
16 threshold_multiotsu, try_all_threshold,
17 apply_hysteresis_threshold)
18 from .ridges import (meijering, sato, frangi, hessian)
19 from . import rank
20 from ._median import median
21 from ._sparse import correlate_sparse
22 from ._unsharp_mask import unsharp_mask
23 from ._window import window
24
25
26 __all__ = ['inverse',
27 'correlate_sparse',
28 'wiener',
29 'LPIFilter2D',
30 'gaussian',
31 'difference_of_gaussians',
32 'median',
33 'sobel',
34 'sobel_h',
35 'sobel_v',
36 'scharr',
37 'scharr_h',
38 'scharr_v',
39 'prewitt',
40 'prewitt_h',
41 'prewitt_v',
42 'roberts',
43 'roberts_pos_diag',
44 'roberts_neg_diag',
45 'laplace',
46 'rank_order',
47 'gabor_kernel',
48 'gabor',
49 'try_all_threshold',
50 'meijering',
51 'sato',
52 'frangi',
53 'hessian',
54 'threshold_otsu',
55 'threshold_yen',
56 'threshold_isodata',
57 'threshold_li',
58 'threshold_local',
59 'threshold_minimum',
60 'threshold_mean',
61 'threshold_niblack',
62 'threshold_sauvola',
63 'threshold_triangle',
64 'threshold_multiotsu',
65 'apply_hysteresis_threshold',
66 'rank',
67 'unsharp_mask',
68 'window']
69
[end of skimage/filters/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py
--- a/skimage/filters/__init__.py
+++ b/skimage/filters/__init__.py
@@ -43,6 +43,9 @@
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
+ 'farid',
+ 'farid_h',
+ 'farid_v',
'rank_order',
'gabor_kernel',
'gabor',
| {"golden_diff": "diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -43,6 +43,9 @@\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n+ 'farid',\n+ 'farid_h',\n+ 'farid_v',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n", "issue": "filters.farid missing from skimage.filters documentation\n## Description\r\n\r\nThe `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import (gaussian, _guess_spatial_dimensions,\n difference_of_gaussians)\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace,\n farid, farid_h, farid_v)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom .thresholding import (threshold_local, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n threshold_multiotsu, try_all_threshold,\n apply_hysteresis_threshold)\nfrom .ridges import (meijering, sato, frangi, hessian)\nfrom . import rank\nfrom ._median import median\nfrom ._sparse import correlate_sparse\nfrom ._unsharp_mask import unsharp_mask\nfrom ._window import window\n\n\n__all__ = ['inverse',\n 'correlate_sparse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'difference_of_gaussians',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'meijering',\n 'sato',\n 'frangi',\n 'hessian',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_local',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'threshold_multiotsu',\n 'apply_hysteresis_threshold',\n 'rank',\n 'unsharp_mask',\n 'window']\n", "path": "skimage/filters/__init__.py"}]} | 1,255 | 117 |
gh_patches_debug_43025 | rasdani/github-patches | git_diff | azavea__raster-vision-641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include per-scene metrics in eval.json
It would be useful to see metrics for each scene in addition to metrics averaged over all scenes.
</issue>
<code>
[start of rastervision/evaluation/classification_evaluation.py]
1 from abc import (ABC, abstractmethod)
2
3 import json
4
5 from rastervision.evaluation import ClassEvaluationItem
6 from rastervision.utils.files import str_to_file
7
8
9 class ClassificationEvaluation(ABC):
10 """Base class for evaluating predictions for tasks that have classes.
11
12 Evaluations can be keyed, for instance, if evaluations happen per class.
13 """
14
15 def __init__(self):
16 self.clear()
17
18 def clear(self):
19 """Clear the Evaluation."""
20 self.class_to_eval_item = {}
21 self.avg_item = None
22
23 def set_class_to_eval_item(self, class_to_eval_item):
24 self.class_to_eval_item = class_to_eval_item
25
26 def get_by_id(self, key):
27 """Gets the evaluation for a particular EvaluationItem key"""
28 return self.class_to_eval_item[key]
29
30 def has_id(self, key):
31 """Answers whether or not the EvaluationItem key is represented"""
32 return key in self.class_to_eval_item
33
34 def to_json(self):
35 json_rep = []
36 for eval_item in self.class_to_eval_item.values():
37 json_rep.append(eval_item.to_json())
38 json_rep.append(self.avg_item.to_json())
39 return json_rep
40
41 def save(self, output_uri):
42 """Save this Evaluation to a file.
43
44 Args:
45 output_uri: string URI for the file to write.
46 """
47 json_str = json.dumps(self.to_json(), indent=4)
48 str_to_file(json_str, output_uri)
49
50 def merge(self, evaluation):
51 """Merge Evaluation for another Scene into this one.
52
53 This is useful for computing the average metrics of a set of scenes.
54 The results of the averaging are stored in this Evaluation.
55
56 Args:
57 evaluation: Evaluation to merge into this one
58 """
59 if len(self.class_to_eval_item) == 0:
60 self.class_to_eval_item = evaluation.class_to_eval_item
61 else:
62 for key, other_eval_item in \
63 evaluation.class_to_eval_item.items():
64 if self.has_id(key):
65 self.get_by_id(key).merge(other_eval_item)
66 else:
67 self.class_to_eval_item[key] = other_eval_item
68
69 self.compute_avg()
70
71 def compute_avg(self):
72 """Compute average metrics over all keys."""
73 self.avg_item = ClassEvaluationItem(class_name='average')
74 for eval_item in self.class_to_eval_item.values():
75 self.avg_item.merge(eval_item)
76
77 @abstractmethod
78 def compute(self, ground_truth_labels, prediction_labels):
79 """Compute metrics for a single scene.
80
81 Args:
82 ground_truth_labels: Ground Truth labels to evaluate against.
83 prediction_labels: The predicted labels to evaluate.
84 """
85 pass
86
[end of rastervision/evaluation/classification_evaluation.py]
[start of rastervision/evaluation/semantic_segmentation_evaluator.py]
1 import logging
2
3 from rastervision.data import ActivateMixin
4 from rastervision.rv_config import RVConfig
5 from rastervision.utils.files import (download_if_needed)
6 from rastervision.evaluation import (ClassificationEvaluator,
7 SemanticSegmentationEvaluation)
8
9 log = logging.getLogger(__name__)
10
11
12 class SemanticSegmentationEvaluator(ClassificationEvaluator):
13 """Evaluates predictions for a set of scenes.
14 """
15
16 def __init__(self, class_map, output_uri):
17 super().__init__(class_map, output_uri)
18
19 def create_evaluation(self):
20 return SemanticSegmentationEvaluation(self.class_map)
21
22 def process(self, scenes, tmp_dir):
23 evaluation = self.create_evaluation()
24 for scene in scenes:
25 log.info('Computing evaluation for scene {}...'.format(scene.id))
26 label_source = scene.ground_truth_label_source
27 label_store = scene.prediction_label_store
28 with ActivateMixin.compose(label_source, label_store):
29 ground_truth = label_source.get_labels()
30 predictions = label_store.get_labels()
31
32 if scene.aoi_polygons:
33 # Filter labels based on AOI.
34 ground_truth = ground_truth.filter_by_aoi(
35 scene.aoi_polygons)
36 predictions = predictions.filter_by_aoi(scene.aoi_polygons)
37 scene_evaluation = self.create_evaluation()
38 scene_evaluation.compute(ground_truth, predictions)
39 evaluation.merge(scene_evaluation)
40
41 if hasattr(label_source, 'source') and hasattr(
42 label_source.source, 'vector_source') and hasattr(
43 label_store, 'vector_output'):
44 tmp_dir = RVConfig.get_tmp_dir().name
45 gt_geojson = label_source.source.vector_source.get_geojson()
46 for vo in label_store.vector_output:
47 pred_geojson = vo['uri']
48 mode = vo['mode']
49 class_id = vo['class_id']
50 pred_geojson_local = download_if_needed(
51 pred_geojson, tmp_dir)
52 scene_evaluation = self.create_evaluation()
53 scene_evaluation.compute_vector(
54 gt_geojson, pred_geojson_local, mode, class_id)
55 evaluation.merge(scene_evaluation)
56
57 evaluation.save(self.output_uri)
58
[end of rastervision/evaluation/semantic_segmentation_evaluator.py]
[start of rastervision/evaluation/classification_evaluator.py]
1 from abc import (abstractmethod)
2 import logging
3
4 from rastervision.evaluation import Evaluator
5 from rastervision.data import ActivateMixin
6
7 log = logging.getLogger(__name__)
8
9
10 class ClassificationEvaluator(Evaluator):
11 """Evaluates predictions for a set of scenes.
12 """
13
14 def __init__(self, class_map, output_uri):
15 self.class_map = class_map
16 self.output_uri = output_uri
17
18 @abstractmethod
19 def create_evaluation(self):
20 pass
21
22 def process(self, scenes, tmp_dir):
23 evaluation = self.create_evaluation()
24 for scene in scenes:
25 log.info('Computing evaluation for scene {}...'.format(scene.id))
26 label_source = scene.ground_truth_label_source
27 label_store = scene.prediction_label_store
28 with ActivateMixin.compose(label_source, label_store):
29 ground_truth = label_source.get_labels()
30 predictions = label_store.get_labels()
31
32 if scene.aoi_polygons:
33 # Filter labels based on AOI.
34 ground_truth = ground_truth.filter_by_aoi(
35 scene.aoi_polygons)
36 predictions = predictions.filter_by_aoi(scene.aoi_polygons)
37 scene_evaluation = self.create_evaluation()
38 scene_evaluation.compute(ground_truth, predictions)
39 evaluation.merge(scene_evaluation)
40
41 evaluation.save(self.output_uri)
42
[end of rastervision/evaluation/classification_evaluator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rastervision/evaluation/classification_evaluation.py b/rastervision/evaluation/classification_evaluation.py
--- a/rastervision/evaluation/classification_evaluation.py
+++ b/rastervision/evaluation/classification_evaluation.py
@@ -1,4 +1,5 @@
from abc import (ABC, abstractmethod)
+import copy
import json
@@ -18,6 +19,7 @@
def clear(self):
"""Clear the Evaluation."""
self.class_to_eval_item = {}
+ self.scene_to_eval = {}
self.avg_item = None
def set_class_to_eval_item(self, class_to_eval_item):
@@ -36,6 +38,14 @@
for eval_item in self.class_to_eval_item.values():
json_rep.append(eval_item.to_json())
json_rep.append(self.avg_item.to_json())
+
+ if self.scene_to_eval:
+ json_rep = {'overall': json_rep}
+ scene_to_eval_json = {}
+ for scene_id, eval in self.scene_to_eval.items():
+ scene_to_eval_json[scene_id] = eval.to_json()
+ json_rep['per_scene'] = scene_to_eval_json
+
return json_rep
def save(self, output_uri):
@@ -47,7 +57,7 @@
json_str = json.dumps(self.to_json(), indent=4)
str_to_file(json_str, output_uri)
- def merge(self, evaluation):
+ def merge(self, evaluation, scene_id=None):
"""Merge Evaluation for another Scene into this one.
This is useful for computing the average metrics of a set of scenes.
@@ -68,6 +78,9 @@
self.compute_avg()
+ if scene_id is not None:
+ self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)
+
def compute_avg(self):
"""Compute average metrics over all keys."""
self.avg_item = ClassEvaluationItem(class_name='average')
diff --git a/rastervision/evaluation/classification_evaluator.py b/rastervision/evaluation/classification_evaluator.py
--- a/rastervision/evaluation/classification_evaluator.py
+++ b/rastervision/evaluation/classification_evaluator.py
@@ -36,6 +36,5 @@
predictions = predictions.filter_by_aoi(scene.aoi_polygons)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
- evaluation.merge(scene_evaluation)
-
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
evaluation.save(self.output_uri)
diff --git a/rastervision/evaluation/semantic_segmentation_evaluator.py b/rastervision/evaluation/semantic_segmentation_evaluator.py
--- a/rastervision/evaluation/semantic_segmentation_evaluator.py
+++ b/rastervision/evaluation/semantic_segmentation_evaluator.py
@@ -36,7 +36,7 @@
predictions = predictions.filter_by_aoi(scene.aoi_polygons)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
- evaluation.merge(scene_evaluation)
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
if hasattr(label_source, 'source') and hasattr(
label_source.source, 'vector_source') and hasattr(
@@ -52,6 +52,6 @@
scene_evaluation = self.create_evaluation()
scene_evaluation.compute_vector(
gt_geojson, pred_geojson_local, mode, class_id)
- evaluation.merge(scene_evaluation)
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
evaluation.save(self.output_uri)
| {"golden_diff": "diff --git a/rastervision/evaluation/classification_evaluation.py b/rastervision/evaluation/classification_evaluation.py\n--- a/rastervision/evaluation/classification_evaluation.py\n+++ b/rastervision/evaluation/classification_evaluation.py\n@@ -1,4 +1,5 @@\n from abc import (ABC, abstractmethod)\n+import copy\n \n import json\n \n@@ -18,6 +19,7 @@\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n+ self.scene_to_eval = {}\n self.avg_item = None\n \n def set_class_to_eval_item(self, class_to_eval_item):\n@@ -36,6 +38,14 @@\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n+\n+ if self.scene_to_eval:\n+ json_rep = {'overall': json_rep}\n+ scene_to_eval_json = {}\n+ for scene_id, eval in self.scene_to_eval.items():\n+ scene_to_eval_json[scene_id] = eval.to_json()\n+ json_rep['per_scene'] = scene_to_eval_json\n+\n return json_rep\n \n def save(self, output_uri):\n@@ -47,7 +57,7 @@\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n \n- def merge(self, evaluation):\n+ def merge(self, evaluation, scene_id=None):\n \"\"\"Merge Evaluation for another Scene into this one.\n \n This is useful for computing the average metrics of a set of scenes.\n@@ -68,6 +78,9 @@\n \n self.compute_avg()\n \n+ if scene_id is not None:\n+ self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)\n+\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\ndiff --git a/rastervision/evaluation/classification_evaluator.py b/rastervision/evaluation/classification_evaluator.py\n--- a/rastervision/evaluation/classification_evaluator.py\n+++ b/rastervision/evaluation/classification_evaluator.py\n@@ -36,6 +36,5 @@\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n- evaluation.merge(scene_evaluation)\n-\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n evaluation.save(self.output_uri)\ndiff --git a/rastervision/evaluation/semantic_segmentation_evaluator.py b/rastervision/evaluation/semantic_segmentation_evaluator.py\n--- a/rastervision/evaluation/semantic_segmentation_evaluator.py\n+++ b/rastervision/evaluation/semantic_segmentation_evaluator.py\n@@ -36,7 +36,7 @@\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n- evaluation.merge(scene_evaluation)\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n \n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n@@ -52,6 +52,6 @@\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n- evaluation.merge(scene_evaluation)\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n \n evaluation.save(self.output_uri)\n", "issue": "Include per-scene metrics in eval.json\nIt would be useful to see metrics for each scene in addition to metrics averaged over all scenes. \n", "before_files": [{"content": "from abc import (ABC, abstractmethod)\n\nimport json\n\nfrom rastervision.evaluation import ClassEvaluationItem\nfrom rastervision.utils.files import str_to_file\n\n\nclass ClassificationEvaluation(ABC):\n \"\"\"Base class for evaluating predictions for tasks that have classes.\n\n Evaluations can be keyed, for instance, if evaluations happen per class.\n \"\"\"\n\n def __init__(self):\n self.clear()\n\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n self.avg_item = None\n\n def set_class_to_eval_item(self, class_to_eval_item):\n self.class_to_eval_item = class_to_eval_item\n\n def get_by_id(self, key):\n \"\"\"Gets the evaluation for a particular EvaluationItem key\"\"\"\n return self.class_to_eval_item[key]\n\n def has_id(self, key):\n \"\"\"Answers whether or not the EvaluationItem key is represented\"\"\"\n return key in self.class_to_eval_item\n\n def to_json(self):\n json_rep = []\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n return json_rep\n\n def save(self, output_uri):\n \"\"\"Save this Evaluation to a file.\n\n Args:\n output_uri: string URI for the file to write.\n \"\"\"\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n\n def merge(self, evaluation):\n \"\"\"Merge Evaluation for another Scene into this one.\n\n This is useful for computing the average metrics of a set of scenes.\n The results of the averaging are stored in this Evaluation.\n\n Args:\n evaluation: Evaluation to merge into this one\n \"\"\"\n if len(self.class_to_eval_item) == 0:\n self.class_to_eval_item = evaluation.class_to_eval_item\n else:\n for key, other_eval_item in \\\n evaluation.class_to_eval_item.items():\n if self.has_id(key):\n self.get_by_id(key).merge(other_eval_item)\n else:\n self.class_to_eval_item[key] = other_eval_item\n\n self.compute_avg()\n\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\n for eval_item in self.class_to_eval_item.values():\n self.avg_item.merge(eval_item)\n\n @abstractmethod\n def compute(self, ground_truth_labels, prediction_labels):\n \"\"\"Compute metrics for a single scene.\n\n Args:\n ground_truth_labels: Ground Truth labels to evaluate against.\n prediction_labels: The predicted labels to evaluate.\n \"\"\"\n pass\n", "path": "rastervision/evaluation/classification_evaluation.py"}, {"content": "import logging\n\nfrom rastervision.data import ActivateMixin\nfrom rastervision.rv_config import RVConfig\nfrom rastervision.utils.files import (download_if_needed)\nfrom rastervision.evaluation import (ClassificationEvaluator,\n SemanticSegmentationEvaluation)\n\nlog = logging.getLogger(__name__)\n\n\nclass SemanticSegmentationEvaluator(ClassificationEvaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n super().__init__(class_map, output_uri)\n\n def create_evaluation(self):\n return SemanticSegmentationEvaluation(self.class_map)\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation)\n\n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n label_store, 'vector_output'):\n tmp_dir = RVConfig.get_tmp_dir().name\n gt_geojson = label_source.source.vector_source.get_geojson()\n for vo in label_store.vector_output:\n pred_geojson = vo['uri']\n mode = vo['mode']\n class_id = vo['class_id']\n pred_geojson_local = download_if_needed(\n pred_geojson, tmp_dir)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n evaluation.merge(scene_evaluation)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/semantic_segmentation_evaluator.py"}, {"content": "from abc import (abstractmethod)\nimport logging\n\nfrom rastervision.evaluation import Evaluator\nfrom rastervision.data import ActivateMixin\n\nlog = logging.getLogger(__name__)\n\n\nclass ClassificationEvaluator(Evaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n self.class_map = class_map\n self.output_uri = output_uri\n\n @abstractmethod\n def create_evaluation(self):\n pass\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/classification_evaluator.py"}]} | 2,287 | 778 |
gh_patches_debug_23538 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The first worker may crash in ALLREDUCE mode
When the worker is the only running worker pod, `_get_peer_set` will get an empty peer set.
Then consensus_init_kwars will set "known_addr_list" as empty.
This will cause an error in ftlib.
```
[2020-03-30 06:16:07,202] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus
[2020-03-30 06:16:09,206] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()
Setting Bind Address as 11.233.87.89
log file: /tmp/memberlist.log
[2020-03-30 06:16:21,713] [WARNING] [communicator.py:37:__init__] Retry building consensus...
[2020-03-30 06:16:21,713] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus
[2020-03-30 06:16:21,714] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/elasticdl/elasticdl/python/worker/main.py", line 76, in <module>
main()
File "/elasticdl/elasticdl/python/worker/main.py", line 70, in main
set_parallelism=True,
File "/elasticdl/elasticdl/python/worker/worker.py", line 122, in __init__
self._init_from_args(args)
File "/elasticdl/elasticdl/python/worker/worker.py", line 159, in _init_from_args
if self._distribution_strategy == DistributionStrategy.ALLREDUCE
File "/elasticdl/elasticdl/python/collective_ops/communicator.py", line 39, in __init__
known_addr_list=list(self._get_peer_set(service_name))
File "/usr/local/lib/python3.6/dist-packages/ftlib/impl.py", line 137, in manual_join
return self.consensus.manual_join(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py", line 85, in manual_join
self.joined = self._join(known_addr_list, wait_time=wait_time)
File "/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py", line 92, in _join
assert addr_list_len >= 1
AssertionError
```
</issue>
<code>
[start of elasticdl/python/collective_ops/communicator.py]
1 import socket
2
3 from elasticdl.python.common.constants import CollectiveCommunicatorStatus
4 from elasticdl.python.common.log_utils import default_logger as logger
5
6 try:
7 from ftlib import BasicFTLib
8 from ftlib.ftlib_status import FTAllReduceStatus
9
10 _FTLIB_INSTALLED = True
11 except ImportError:
12 BasicFTLib = object
13 FTAllReduceStatus = object
14 _FTLIB_INSTALLED = False
15
16
17 _SUPPORTED_ALLREDUCE_OPS = ["MEAN"]
18 _FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (
19 "FTLib is not installed. Default to succeeded for testing purposes"
20 )
21
22
23 class CollectiveCommunicator(object):
24 def __init__(self, service_name=None):
25 if _FTLIB_INSTALLED:
26 self._ftlib = BasicFTLib(
27 consensus="gossip",
28 commlib="pytorch",
29 consensus_init_kwargs={
30 "known_addr_list": list(self._get_peer_set(service_name)),
31 "custom_bind_addr": socket.gethostbyname(
32 socket.gethostname()
33 ),
34 },
35 )
36 while not self._ftlib.consensus_joined():
37 logger.warning("Retry building consensus...")
38 self._ftlib.manual_join(
39 known_addr_list=list(self._get_peer_set(service_name))
40 )
41 else:
42 logger.warning(
43 "FTLib is not installed. The CollectiveCommunicator "
44 "may not work as expected"
45 )
46 self._ftlib = None
47
48 def allreduce(self, data, op="MEAN"):
49 if data is None:
50 logger.error("Data is required for allreduce operation")
51 return CollectiveCommunicatorStatus.FAILED, data
52 if op not in _SUPPORTED_ALLREDUCE_OPS:
53 logger.error(
54 "%s is not in list of supported allreduce operations: %s"
55 % (op, _SUPPORTED_ALLREDUCE_OPS)
56 )
57 return CollectiveCommunicatorStatus.FAILED, data
58 if self._ftlib is not None:
59 res = self._ftlib.wait_gradients_ready(data)
60 if res == FTAllReduceStatus.SUCCESS:
61 return CollectiveCommunicatorStatus.SUCCEEDED, data
62 else:
63 return CollectiveCommunicatorStatus.FAILED, data
64 else:
65 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
66 return CollectiveCommunicatorStatus.SUCCEEDED, data
67
68 def broadcast(self, data, src_rank):
69 if self._ftlib is not None:
70 res = self._ftlib.broadcast(data, src_rank)
71 if res == FTAllReduceStatus.SUCCESS:
72 return CollectiveCommunicatorStatus.SUCCEEDED, data
73 else:
74 return CollectiveCommunicatorStatus.FAILED, data
75 else:
76 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
77 return CollectiveCommunicatorStatus.SUCCEEDED, data
78
79 def barrier(self):
80 if self._ftlib is not None:
81 res = self._ftlib.barrier()
82 if res == FTAllReduceStatus.SUCCESS:
83 return CollectiveCommunicatorStatus.SUCCEEDED
84 else:
85 return CollectiveCommunicatorStatus.FAILED
86 else:
87 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
88 return CollectiveCommunicatorStatus.SUCCEEDED
89
90 def is_initialized(self):
91 """This will be `False` under three occasions:
92 * New workers report joining in
93 * Collective-communication operations fail or time out
94 * Liveness probe fails for existing workers
95 """
96 if self._ftlib is not None:
97 return self._ftlib.initialized
98 else:
99 return True
100
101 def _get_peer_set(self, svc_name):
102 if svc_name is None:
103 return None
104 my_ip = socket.gethostbyname(socket.gethostname())
105 temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)
106 peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}
107 return peer_set
108
[end of elasticdl/python/collective_ops/communicator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/collective_ops/communicator.py b/elasticdl/python/collective_ops/communicator.py
--- a/elasticdl/python/collective_ops/communicator.py
+++ b/elasticdl/python/collective_ops/communicator.py
@@ -23,17 +23,18 @@
class CollectiveCommunicator(object):
def __init__(self, service_name=None):
if _FTLIB_INSTALLED:
+ peer_list = list(self._get_peer_set(service_name))
self._ftlib = BasicFTLib(
consensus="gossip",
commlib="pytorch",
consensus_init_kwargs={
- "known_addr_list": list(self._get_peer_set(service_name)),
+ "known_addr_list": peer_list,
"custom_bind_addr": socket.gethostbyname(
socket.gethostname()
),
},
)
- while not self._ftlib.consensus_joined():
+ while peer_list and not self._ftlib.consensus_joined():
logger.warning("Retry building consensus...")
self._ftlib.manual_join(
known_addr_list=list(self._get_peer_set(service_name))
| {"golden_diff": "diff --git a/elasticdl/python/collective_ops/communicator.py b/elasticdl/python/collective_ops/communicator.py\n--- a/elasticdl/python/collective_ops/communicator.py\n+++ b/elasticdl/python/collective_ops/communicator.py\n@@ -23,17 +23,18 @@\n class CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n+ peer_list = list(self._get_peer_set(service_name))\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n- \"known_addr_list\": list(self._get_peer_set(service_name)),\n+ \"known_addr_list\": peer_list,\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n- while not self._ftlib.consensus_joined():\n+ while peer_list and not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n", "issue": "The first worker may crash in ALLREDUCE mode \nWhen the worker is the only running worker pod, `_get_peer_set` will get an empty peer set.\r\nThen consensus_init_kwars will set \"known_addr_list\" as empty.\r\nThis will cause an error in ftlib.\r\n\r\n\r\n```\r\n[2020-03-30 06:16:07,202] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus\r\n[2020-03-30 06:16:09,206] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()\r\nSetting Bind Address as 11.233.87.89\r\nlog file: /tmp/memberlist.log\r\n[2020-03-30 06:16:21,713] [WARNING] [communicator.py:37:__init__] Retry building consensus...\r\n[2020-03-30 06:16:21,713] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus\r\n[2020-03-30 06:16:21,714] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/elasticdl/elasticdl/python/worker/main.py\", line 76, in <module>\r\n main()\r\n File \"/elasticdl/elasticdl/python/worker/main.py\", line 70, in main\r\n set_parallelism=True,\r\n File \"/elasticdl/elasticdl/python/worker/worker.py\", line 122, in __init__\r\n self._init_from_args(args)\r\n File \"/elasticdl/elasticdl/python/worker/worker.py\", line 159, in _init_from_args\r\n if self._distribution_strategy == DistributionStrategy.ALLREDUCE\r\n File \"/elasticdl/elasticdl/python/collective_ops/communicator.py\", line 39, in __init__\r\n known_addr_list=list(self._get_peer_set(service_name))\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/impl.py\", line 137, in manual_join\r\n return self.consensus.manual_join(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py\", line 85, in manual_join\r\n self.joined = self._join(known_addr_list, wait_time=wait_time)\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py\", line 92, in _join\r\n assert addr_list_len >= 1\r\nAssertionError\r\n```\n", "before_files": [{"content": "import socket\n\nfrom elasticdl.python.common.constants import CollectiveCommunicatorStatus\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\ntry:\n from ftlib import BasicFTLib\n from ftlib.ftlib_status import FTAllReduceStatus\n\n _FTLIB_INSTALLED = True\nexcept ImportError:\n BasicFTLib = object\n FTAllReduceStatus = object\n _FTLIB_INSTALLED = False\n\n\n_SUPPORTED_ALLREDUCE_OPS = [\"MEAN\"]\n_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (\n \"FTLib is not installed. Default to succeeded for testing purposes\"\n)\n\n\nclass CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n \"known_addr_list\": list(self._get_peer_set(service_name)),\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n while not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n )\n else:\n logger.warning(\n \"FTLib is not installed. The CollectiveCommunicator \"\n \"may not work as expected\"\n )\n self._ftlib = None\n\n def allreduce(self, data, op=\"MEAN\"):\n if data is None:\n logger.error(\"Data is required for allreduce operation\")\n return CollectiveCommunicatorStatus.FAILED, data\n if op not in _SUPPORTED_ALLREDUCE_OPS:\n logger.error(\n \"%s is not in list of supported allreduce operations: %s\"\n % (op, _SUPPORTED_ALLREDUCE_OPS)\n )\n return CollectiveCommunicatorStatus.FAILED, data\n if self._ftlib is not None:\n res = self._ftlib.wait_gradients_ready(data)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def broadcast(self, data, src_rank):\n if self._ftlib is not None:\n res = self._ftlib.broadcast(data, src_rank)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def barrier(self):\n if self._ftlib is not None:\n res = self._ftlib.barrier()\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED\n else:\n return CollectiveCommunicatorStatus.FAILED\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED\n\n def is_initialized(self):\n \"\"\"This will be `False` under three occasions:\n * New workers report joining in\n * Collective-communication operations fail or time out\n * Liveness probe fails for existing workers\n \"\"\"\n if self._ftlib is not None:\n return self._ftlib.initialized\n else:\n return True\n\n def _get_peer_set(self, svc_name):\n if svc_name is None:\n return None\n my_ip = socket.gethostbyname(socket.gethostname())\n temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)\n peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}\n return peer_set\n", "path": "elasticdl/python/collective_ops/communicator.py"}]} | 2,323 | 248 |
gh_patches_debug_53980 | rasdani/github-patches | git_diff | scikit-hep__pyhf-2135 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Matplotlib broken in Pyodide demo in docs
In the docs https://pyhf.readthedocs.io/en/v0.7.0/, the Pyodide example is broken for me:
```pytb
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
Cell In[1], line 3
1 import piplite
2 await piplite.install(["pyhf==0.7.0"])
----> 3 get_ipython().run_line_magic('matplotlib', 'inline')
4 import pyhf
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:2369, in InteractiveShell.run_line_magic(self, magic_name, line, _stack_depth)
2367 kwargs['local_ns'] = self.get_local_scope(stack_depth)
2368 with self.builtin_trap:
-> 2369 result = fn(*args, **kwargs)
2371 # The code below prevents the output from being displayed
2372 # when using magics with decodator @output_can_be_silenced
2373 # when the last Python token in the expression is a ';'.
2374 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
File /lib/python3.10/site-packages/IPython/core/magics/pylab.py:99, in PylabMagics.matplotlib(self, line)
97 print("Available matplotlib backends: %s" % backends_list)
98 else:
---> 99 gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)
100 self._show_matplotlib_backend(args.gui, backend)
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:3540, in InteractiveShell.enable_matplotlib(self, gui)
3519 def enable_matplotlib(self, gui=None):
3520 """Enable interactive matplotlib and inline figure support.
3521
3522 This takes the following steps:
(...)
3538 display figures inline.
3539 """
-> 3540 from matplotlib_inline.backend_inline import configure_inline_support
3542 from IPython.core import pylabtools as pt
3543 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
File /lib/python3.10/site-packages/matplotlib_inline/__init__.py:1
----> 1 from . import backend_inline, config # noqa
2 __version__ = "0.1.6" # noqa
File /lib/python3.10/site-packages/matplotlib_inline/backend_inline.py:6
1 """A matplotlib backend for publishing figures via display_data"""
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the BSD 3-Clause License.
----> 6 import matplotlib
7 from matplotlib import colors
8 from matplotlib.backends import backend_agg
ModuleNotFoundError: The module 'matplotlib' is included in the Pyodide distribution, but it is not installed.
You can install it by calling:
await micropip.install("matplotlib") in Python, or
await pyodide.loadPackage("matplotlib") in JavaScript
See https://pyodide.org/en/stable/usage/loading-packages.html for more details.
```
It used to work previously, though I can not say for sure when it last worked for me. Running on MacOS (ARM), tried Firefox and Chrome (resulting in the above), while Safari seems stuck executing the import commands provided.
</issue>
<code>
[start of docs/generate_jupyterlite_iframe.py]
1 import urllib.parse
2
3
4 def main():
5 code = """\
6 import piplite
7 await piplite.install(["pyhf==0.7.0"])
8 %matplotlib inline
9 import pyhf\
10 """
11
12 parsed_url = urllib.parse.quote(code)
13 url_base = "https://jupyterlite.github.io/demo/repl/index.html"
14 jupyterlite_options = "?kernel=python&toolbar=1&code="
15 jupyterlite_url = url_base + jupyterlite_options + parsed_url
16
17 print(f"# jupyterlite URL:\n{jupyterlite_url}")
18
19 jupyterlite_iframe_rst = f"""\
20 <iframe
21 src="{jupyterlite_url}"
22 width="100%"
23 height="500px"
24 ></iframe>\
25 """
26 print(f"\n# RST for iframe for jupyterlite.rst:\n{jupyterlite_iframe_rst}")
27
28
29 if __name__ == "__main__":
30 raise SystemExit(main())
31
[end of docs/generate_jupyterlite_iframe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py
--- a/docs/generate_jupyterlite_iframe.py
+++ b/docs/generate_jupyterlite_iframe.py
@@ -4,7 +4,7 @@
def main():
code = """\
import piplite
-await piplite.install(["pyhf==0.7.0"])
+await piplite.install(["pyhf==0.7.0", "matplotlib>=3.0.0"])
%matplotlib inline
import pyhf\
"""
| {"golden_diff": "diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py\n--- a/docs/generate_jupyterlite_iframe.py\n+++ b/docs/generate_jupyterlite_iframe.py\n@@ -4,7 +4,7 @@\n def main():\n code = \"\"\"\\\n import piplite\n-await piplite.install([\"pyhf==0.7.0\"])\n+await piplite.install([\"pyhf==0.7.0\", \"matplotlib>=3.0.0\"])\n %matplotlib inline\n import pyhf\\\n \"\"\"\n", "issue": "Matplotlib broken in Pyodide demo in docs\nIn the docs https://pyhf.readthedocs.io/en/v0.7.0/, the Pyodide example is broken for me:\r\n```pytb\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\nCell In[1], line 3\r\n 1 import piplite\r\n 2 await piplite.install([\"pyhf==0.7.0\"])\r\n----> 3 get_ipython().run_line_magic('matplotlib', 'inline')\r\n 4 import pyhf\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/interactiveshell.py:2369, in InteractiveShell.run_line_magic(self, magic_name, line, _stack_depth)\r\n 2367 kwargs['local_ns'] = self.get_local_scope(stack_depth)\r\n 2368 with self.builtin_trap:\r\n-> 2369 result = fn(*args, **kwargs)\r\n 2371 # The code below prevents the output from being displayed\r\n 2372 # when using magics with decodator @output_can_be_silenced\r\n 2373 # when the last Python token in the expression is a ';'.\r\n 2374 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/magics/pylab.py:99, in PylabMagics.matplotlib(self, line)\r\n 97 print(\"Available matplotlib backends: %s\" % backends_list)\r\n 98 else:\r\n---> 99 gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)\r\n 100 self._show_matplotlib_backend(args.gui, backend)\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/interactiveshell.py:3540, in InteractiveShell.enable_matplotlib(self, gui)\r\n 3519 def enable_matplotlib(self, gui=None):\r\n 3520 \"\"\"Enable interactive matplotlib and inline figure support.\r\n 3521 \r\n 3522 This takes the following steps:\r\n (...)\r\n 3538 display figures inline.\r\n 3539 \"\"\"\r\n-> 3540 from matplotlib_inline.backend_inline import configure_inline_support\r\n 3542 from IPython.core import pylabtools as pt\r\n 3543 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)\r\n\r\nFile /lib/python3.10/site-packages/matplotlib_inline/__init__.py:1\r\n----> 1 from . import backend_inline, config # noqa\r\n 2 __version__ = \"0.1.6\" # noqa\r\n\r\nFile /lib/python3.10/site-packages/matplotlib_inline/backend_inline.py:6\r\n 1 \"\"\"A matplotlib backend for publishing figures via display_data\"\"\"\r\n 3 # Copyright (c) IPython Development Team.\r\n 4 # Distributed under the terms of the BSD 3-Clause License.\r\n----> 6 import matplotlib\r\n 7 from matplotlib import colors\r\n 8 from matplotlib.backends import backend_agg\r\n\r\nModuleNotFoundError: The module 'matplotlib' is included in the Pyodide distribution, but it is not installed.\r\nYou can install it by calling:\r\n await micropip.install(\"matplotlib\") in Python, or\r\n await pyodide.loadPackage(\"matplotlib\") in JavaScript\r\nSee https://pyodide.org/en/stable/usage/loading-packages.html for more details.\r\n```\r\nIt used to work previously, though I can not say for sure when it last worked for me. Running on MacOS (ARM), tried Firefox and Chrome (resulting in the above), while Safari seems stuck executing the import commands provided.\n", "before_files": [{"content": "import urllib.parse\n\n\ndef main():\n code = \"\"\"\\\nimport piplite\nawait piplite.install([\"pyhf==0.7.0\"])\n%matplotlib inline\nimport pyhf\\\n\"\"\"\n\n parsed_url = urllib.parse.quote(code)\n url_base = \"https://jupyterlite.github.io/demo/repl/index.html\"\n jupyterlite_options = \"?kernel=python&toolbar=1&code=\"\n jupyterlite_url = url_base + jupyterlite_options + parsed_url\n\n print(f\"# jupyterlite URL:\\n{jupyterlite_url}\")\n\n jupyterlite_iframe_rst = f\"\"\"\\\n <iframe\n src=\"{jupyterlite_url}\"\n width=\"100%\"\n height=\"500px\"\n ></iframe>\\\n\"\"\"\n print(f\"\\n# RST for iframe for jupyterlite.rst:\\n{jupyterlite_iframe_rst}\")\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "docs/generate_jupyterlite_iframe.py"}]} | 1,632 | 120 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.