problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_9074 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tests broken on Python < 3.8
```python
_______ ERROR collecting tests_async/transport/test_aiohttp_requests.py ________
ImportError while importing test module '/home/tseaver/projects/agendaless/Google/src/google-auth/tests_async/transport/test_aiohttp_requests.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:14: in <module>
from unittest import IsolatedAsyncioTestCase, skipIf
E ImportError: cannot import name 'IsolatedAsyncioTestCase'
During handling of the above exception, another exception occurred:
/opt/Python-3.6.10/lib/python3.6/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests_async/transport/test_aiohttp_requests.py:16: in <module>
from aioresponses import aioresponses, core
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/__init__.py:2: in <module>
from .core import CallbackResult, aioresponses
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/core.py:23: in <module>
from .compat import (
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:29: in <module>
from asynctest import fail_on, skipIf
E ModuleNotFoundError: No module named 'asynctest'
```
Due to https://github.com/pnuckowski/aioresponses/issues/172
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import nox
16
17 TEST_DEPENDENCIES = [
18 "flask",
19 "freezegun",
20 "mock",
21 "oauth2client",
22 "pyopenssl",
23 "pytest",
24 "pytest-cov",
25 "pytest-localserver",
26 "requests",
27 "urllib3",
28 "cryptography",
29 "responses",
30 "grpcio",
31 ]
32
33 ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses"]
34
35 BLACK_VERSION = "black==19.3b0"
36 BLACK_PATHS = [
37 "google",
38 "tests",
39 "tests_async",
40 "noxfile.py",
41 "setup.py",
42 "docs/conf.py",
43 ]
44
45
46 @nox.session(python="3.7")
47 def lint(session):
48 session.install("flake8", "flake8-import-order", "docutils", BLACK_VERSION)
49 session.install(".")
50 session.run("black", "--check", *BLACK_PATHS)
51 session.run(
52 "flake8",
53 "--import-order-style=google",
54 "--application-import-names=google,tests,system_tests",
55 "google",
56 "tests",
57 "tests_async",
58 )
59 session.run(
60 "python", "setup.py", "check", "--metadata", "--restructuredtext", "--strict"
61 )
62
63
64 @nox.session(python="3.6")
65 def blacken(session):
66 """Run black.
67
68 Format code to uniform standard.
69
70 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
71 That run uses an image that doesn't have 3.6 installed. Before updating this
72 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
73 """
74 session.install(BLACK_VERSION)
75 session.run("black", *BLACK_PATHS)
76
77
78 @nox.session(python=["3.6", "3.7", "3.8"])
79 def unit(session):
80 session.install(*TEST_DEPENDENCIES)
81 session.install(*(ASYNC_DEPENDENCIES))
82 session.install(".")
83 session.run(
84 "pytest",
85 "--cov=google.auth",
86 "--cov=google.oauth2",
87 "--cov=tests",
88 "tests",
89 "tests_async",
90 )
91
92
93 @nox.session(python=["2.7", "3.5"])
94 def unit_prev_versions(session):
95 session.install(*TEST_DEPENDENCIES)
96 session.install(".")
97 session.run(
98 "pytest", "--cov=google.auth", "--cov=google.oauth2", "--cov=tests", "tests"
99 )
100
101
102 @nox.session(python="3.7")
103 def cover(session):
104 session.install(*TEST_DEPENDENCIES)
105 session.install(*(ASYNC_DEPENDENCIES))
106 session.install(".")
107 session.run(
108 "pytest",
109 "--cov=google.auth",
110 "--cov=google.oauth2",
111 "--cov=tests",
112 "--cov=tests_async",
113 "--cov-report=",
114 "tests",
115 "tests_async",
116 )
117 session.run("coverage", "report", "--show-missing", "--fail-under=100")
118
119
120 @nox.session(python="3.7")
121 def docgen(session):
122 session.env["SPHINX_APIDOC_OPTIONS"] = "members,inherited-members,show-inheritance"
123 session.install(*TEST_DEPENDENCIES)
124 session.install("sphinx")
125 session.install(".")
126 session.run("rm", "-r", "docs/reference")
127 session.run(
128 "sphinx-apidoc",
129 "--output-dir",
130 "docs/reference",
131 "--separate",
132 "--module-first",
133 "google",
134 )
135
136
137 @nox.session(python="3.7")
138 def docs(session):
139 session.install("sphinx", "-r", "docs/requirements-docs.txt")
140 session.install(".")
141 session.run("make", "-C", "docs", "html")
142
143
144 @nox.session(python="pypy")
145 def pypy(session):
146 session.install(*TEST_DEPENDENCIES)
147 session.install(".")
148 session.run(
149 "pytest",
150 "--cov=google.auth",
151 "--cov=google.oauth2",
152 "--cov=tests",
153 "tests",
154 "tests_async",
155 )
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -30,7 +30,7 @@
"grpcio",
]
-ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses"]
+ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses", "asynctest"]
BLACK_VERSION = "black==19.3b0"
BLACK_PATHS = [
@@ -144,6 +144,7 @@
@nox.session(python="pypy")
def pypy(session):
session.install(*TEST_DEPENDENCIES)
+ session.install(*ASYNC_DEPENDENCIES)
session.install(".")
session.run(
"pytest",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -30,7 +30,7 @@\n \"grpcio\",\n ]\n \n-ASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\"]\n+ASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n \n BLACK_VERSION = \"black==19.3b0\"\n BLACK_PATHS = [\n@@ -144,6 +144,7 @@\n @nox.session(python=\"pypy\")\n def pypy(session):\n session.install(*TEST_DEPENDENCIES)\n+ session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n", "issue": "Tests broken on Python < 3.8\n```python\r\n_______ ERROR collecting tests_async/transport/test_aiohttp_requests.py ________\r\nImportError while importing test module '/home/tseaver/projects/agendaless/Google/src/google-auth/tests_async/transport/test_aiohttp_requests.py'.\r\nHint: make sure your test modules/packages have valid Python names.\r\nTraceback:\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:14: in <module>\r\n from unittest import IsolatedAsyncioTestCase, skipIf\r\nE ImportError: cannot import name 'IsolatedAsyncioTestCase'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n/opt/Python-3.6.10/lib/python3.6/importlib/__init__.py:126: in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\ntests_async/transport/test_aiohttp_requests.py:16: in <module>\r\n from aioresponses import aioresponses, core\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/__init__.py:2: in <module>\r\n from .core import CallbackResult, aioresponses\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/core.py:23: in <module>\r\n from .compat import (\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:29: in <module>\r\n from asynctest import fail_on, skipIf\r\nE ModuleNotFoundError: No module named 'asynctest'\r\n```\r\n\r\nDue to https://github.com/pnuckowski/aioresponses/issues/172\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}]} | 2,001 | 164 |
gh_patches_debug_38536 | rasdani/github-patches | git_diff | chainer__chainer-6175 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`L.Deconvolution2D` lacks `dilate` option
although `F.deconvolution_2d` already supports it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/links/connection/deconvolution_2d.py`
Content:
```
1 import numpy
2
3 from chainer.backends import cuda
4 from chainer.functions.connection import deconvolution_2d
5 from chainer import initializers
6 from chainer import link
7 from chainer.utils import argument
8 from chainer import variable
9
10
11 class Deconvolution2D(link.Link):
12
13 """__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)
14
15 Two dimensional deconvolution function.
16
17 This link wraps the :func:`~chainer.functions.deconvolution_2d` function
18 and holds the filter weight and bias vector as parameters.
19
20 Deconvolution links can use a feature of cuDNN called autotuning, which
21 selects the most efficient CNN algorithm for images of fixed-size,
22 can provide a significant performance boost for fixed neural nets.
23 To enable, set `chainer.using_config('autotune', True)`
24
25 Args:
26 in_channels (int or None): Number of channels of input arrays.
27 If ``None``, parameter initialization will be deferred until the
28 first forward data pass at which time the size will be determined.
29 out_channels (int): Number of channels of output arrays.
30 ksize (int or pair of ints): Size of filters (a.k.a. kernels).
31 ``ksize=k`` and ``ksize=(k, k)`` are equivalent.
32 stride (int or pair of ints): Stride of filter applications.
33 ``stride=s`` and ``stride=(s, s)`` are equivalent.
34 pad (int or pair of ints): Spatial padding width for input arrays.
35 ``pad=p`` and ``pad=(p, p)`` are equivalent.
36 nobias (bool): If ``True``, then this function does not use the bias
37 term.
38 outsize (tuple): Expected output size of deconvolutional operation.
39 It should be pair of height and width :math:`(out_H, out_W)`.
40 Default value is ``None`` and the outsize is estimated by
41 input size, stride and pad.
42 initialW (:ref:`initializer <initializer>`): Initializer to
43 initialize the weight. When it is :class:`numpy.ndarray`,
44 its ``ndim`` should be 4.
45 initial_bias (:ref:`initializer <initializer>`): Initializer to
46 initialize the bias. If ``None``, the bias will be initialized to
47 zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
48 groups (int): The number of groups to use grouped deconvolution. The
49 default is one, where grouped deconvolution is not used.
50
51 The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)`
52 which indicate the number of input channels, output channels,
53 height and width of the kernels, respectively.
54 The filter weight is initialized with i.i.d. Gaussian random samples, each
55 of which has zero mean and deviation :math:`\\sqrt{1/(c_I k_H k_W)}` by
56 default.
57
58 The bias vector is of size :math:`c_O`.
59 Its elements are initialized by ``bias`` argument.
60 If ``nobias`` argument is set to True, then this function does not hold
61 the bias parameter.
62
63 The output of this function can be non-deterministic when it uses cuDNN.
64 If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and
65 cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.
66
67 .. seealso::
68 See :func:`chainer.functions.deconvolution_2d` for the definition of
69 two-dimensional convolution.
70
71 .. seealso::
72 See :func:`chainer.links.Convolution2D` for the examples of ways to
73 give arguments to this link.
74
75 .. admonition:: Example
76
77 There are several ways to make a Deconvolution2D link.
78
79 Let an input vector ``x`` be:
80
81 >>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(1, 3, 10, 10)
82
83 1. Give the first three arguments explicitly:
84
85 In this case, all the other arguments are set to the default
86 values.
87
88 >>> l = L.Deconvolution2D(3, 7, 4)
89 >>> y = l(x)
90 >>> y.shape
91 (1, 7, 13, 13)
92
93 2. Omit ``in_channels`` or fill it with ``None``:
94
95 The below two cases are the same.
96
97 >>> l = L.Deconvolution2D(7, 4)
98 >>> y = l(x)
99 >>> y.shape
100 (1, 7, 13, 13)
101
102 >>> l = L.Deconvolution2D(None, 7, 4)
103 >>> y = l(x)
104 >>> y.shape
105 (1, 7, 13, 13)
106
107 When you omit the first argument, you need to specify the other
108 subsequent arguments from ``stride`` as keyword arguments. So the
109 below two cases are the same.
110
111 >>> l = L.Deconvolution2D(None, 7, 4, 2, 1)
112 >>> y = l(x)
113 >>> y.shape
114 (1, 7, 20, 20)
115
116 >>> l = L.Deconvolution2D(7, 4, stride=2, pad=1)
117 >>> y = l(x)
118 >>> y.shape
119 (1, 7, 20, 20)
120
121 """ # NOQA
122
123 def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
124 nobias=False, outsize=None, initialW=None, initial_bias=None,
125 **kwargs):
126 super(Deconvolution2D, self).__init__()
127
128 groups, = argument.parse_kwargs(
129 kwargs, ('groups', 1),
130 deterministic="deterministic argument is not supported anymore. "
131 "Use chainer.using_config('cudnn_deterministic', value) "
132 "context where value is either `True` or `False`.")
133
134 if ksize is None:
135 out_channels, ksize, in_channels = in_channels, out_channels, None
136
137 self.ksize = ksize
138 self.stride = _pair(stride)
139 self.pad = _pair(pad)
140 self.outsize = (None, None) if outsize is None else outsize
141 self.out_channels = out_channels
142 self.groups = int(groups)
143
144 with self.init_scope():
145 W_initializer = initializers._get_initializer(initialW)
146 self.W = variable.Parameter(W_initializer)
147 if in_channels is not None:
148 self._initialize_params(in_channels)
149
150 if nobias:
151 self.b = None
152 else:
153 if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):
154 assert initial_bias.shape == (out_channels,)
155 if initial_bias is None:
156 initial_bias = 0
157 bias_initializer = initializers._get_initializer(initial_bias)
158 self.b = variable.Parameter(bias_initializer, out_channels)
159
160 def _initialize_params(self, in_channels):
161 kh, kw = _pair(self.ksize)
162 if self.out_channels % self.groups != 0:
163 raise ValueError('the number of output channels must be'
164 'divisible by the number of groups')
165 if in_channels % self.groups != 0:
166 raise ValueError('the number of input channels must be'
167 'divisible by the number of groups')
168 W_shape = (in_channels, int(self.out_channels / self.groups), kh, kw)
169 self.W.initialize(W_shape)
170
171 def forward(self, x):
172 if self.W.array is None:
173 self._initialize_params(x.shape[1])
174 return deconvolution_2d.deconvolution_2d(
175 x, self.W, self.b, self.stride, self.pad, self.outsize,
176 groups=self.groups)
177
178
179 def _pair(x):
180 if hasattr(x, '__getitem__'):
181 return x
182 return x, x
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/links/connection/deconvolution_2d.py b/chainer/links/connection/deconvolution_2d.py
--- a/chainer/links/connection/deconvolution_2d.py
+++ b/chainer/links/connection/deconvolution_2d.py
@@ -10,7 +10,7 @@
class Deconvolution2D(link.Link):
- """__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)
+ """__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, dilate=1, groups=1)
Two dimensional deconvolution function.
@@ -45,6 +45,9 @@
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
+ dilate (:class:`int` or :class:`tuple` of :class:`int` s):
+ Dilation factor of filter applications.
+ ``dilate=d`` and ``dilate=(d, d)`` are equivalent.
groups (int): The number of groups to use grouped deconvolution. The
default is one, where grouped deconvolution is not used.
@@ -125,8 +128,8 @@
**kwargs):
super(Deconvolution2D, self).__init__()
- groups, = argument.parse_kwargs(
- kwargs, ('groups', 1),
+ dilate, groups, = argument.parse_kwargs(
+ kwargs, ('dilate', 1), ('groups', 1),
deterministic="deterministic argument is not supported anymore. "
"Use chainer.using_config('cudnn_deterministic', value) "
"context where value is either `True` or `False`.")
@@ -137,6 +140,7 @@
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
+ self.dilate = _pair(dilate)
self.outsize = (None, None) if outsize is None else outsize
self.out_channels = out_channels
self.groups = int(groups)
@@ -173,7 +177,7 @@
self._initialize_params(x.shape[1])
return deconvolution_2d.deconvolution_2d(
x, self.W, self.b, self.stride, self.pad, self.outsize,
- groups=self.groups)
+ dilate=self.dilate, groups=self.groups)
def _pair(x):
| {"golden_diff": "diff --git a/chainer/links/connection/deconvolution_2d.py b/chainer/links/connection/deconvolution_2d.py\n--- a/chainer/links/connection/deconvolution_2d.py\n+++ b/chainer/links/connection/deconvolution_2d.py\n@@ -10,7 +10,7 @@\n \n class Deconvolution2D(link.Link):\n \n- \"\"\"__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)\n+ \"\"\"__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, dilate=1, groups=1)\n \n Two dimensional deconvolution function.\n \n@@ -45,6 +45,9 @@\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n+ dilate (:class:`int` or :class:`tuple` of :class:`int` s):\n+ Dilation factor of filter applications.\n+ ``dilate=d`` and ``dilate=(d, d)`` are equivalent.\n groups (int): The number of groups to use grouped deconvolution. The\n default is one, where grouped deconvolution is not used.\n \n@@ -125,8 +128,8 @@\n **kwargs):\n super(Deconvolution2D, self).__init__()\n \n- groups, = argument.parse_kwargs(\n- kwargs, ('groups', 1),\n+ dilate, groups, = argument.parse_kwargs(\n+ kwargs, ('dilate', 1), ('groups', 1),\n deterministic=\"deterministic argument is not supported anymore. \"\n \"Use chainer.using_config('cudnn_deterministic', value) \"\n \"context where value is either `True` or `False`.\")\n@@ -137,6 +140,7 @@\n self.ksize = ksize\n self.stride = _pair(stride)\n self.pad = _pair(pad)\n+ self.dilate = _pair(dilate)\n self.outsize = (None, None) if outsize is None else outsize\n self.out_channels = out_channels\n self.groups = int(groups)\n@@ -173,7 +177,7 @@\n self._initialize_params(x.shape[1])\n return deconvolution_2d.deconvolution_2d(\n x, self.W, self.b, self.stride, self.pad, self.outsize,\n- groups=self.groups)\n+ dilate=self.dilate, groups=self.groups)\n \n \n def _pair(x):\n", "issue": "`L.Deconvolution2D` lacks `dilate` option\nalthough `F.deconvolution_2d` already supports it.\n", "before_files": [{"content": "import numpy\n\nfrom chainer.backends import cuda\nfrom chainer.functions.connection import deconvolution_2d\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer.utils import argument\nfrom chainer import variable\n\n\nclass Deconvolution2D(link.Link):\n\n \"\"\"__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)\n\n Two dimensional deconvolution function.\n\n This link wraps the :func:`~chainer.functions.deconvolution_2d` function\n and holds the filter weight and bias vector as parameters.\n\n Deconvolution links can use a feature of cuDNN called autotuning, which\n selects the most efficient CNN algorithm for images of fixed-size, \n can provide a significant performance boost for fixed neural nets.\n To enable, set `chainer.using_config('autotune', True)`\n\n Args:\n in_channels (int or None): Number of channels of input arrays.\n If ``None``, parameter initialization will be deferred until the\n first forward data pass at which time the size will be determined.\n out_channels (int): Number of channels of output arrays.\n ksize (int or pair of ints): Size of filters (a.k.a. kernels).\n ``ksize=k`` and ``ksize=(k, k)`` are equivalent.\n stride (int or pair of ints): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or pair of ints): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n nobias (bool): If ``True``, then this function does not use the bias\n term.\n outsize (tuple): Expected output size of deconvolutional operation.\n It should be pair of height and width :math:`(out_H, out_W)`.\n Default value is ``None`` and the outsize is estimated by\n input size, stride and pad.\n initialW (:ref:`initializer <initializer>`): Initializer to\n initialize the weight. When it is :class:`numpy.ndarray`,\n its ``ndim`` should be 4.\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n groups (int): The number of groups to use grouped deconvolution. The\n default is one, where grouped deconvolution is not used.\n\n The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)`\n which indicate the number of input channels, output channels,\n height and width of the kernels, respectively.\n The filter weight is initialized with i.i.d. Gaussian random samples, each\n of which has zero mean and deviation :math:`\\\\sqrt{1/(c_I k_H k_W)}` by\n default.\n\n The bias vector is of size :math:`c_O`.\n Its elements are initialized by ``bias`` argument.\n If ``nobias`` argument is set to True, then this function does not hold\n the bias parameter.\n\n The output of this function can be non-deterministic when it uses cuDNN.\n If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and\n cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.\n\n .. seealso::\n See :func:`chainer.functions.deconvolution_2d` for the definition of\n two-dimensional convolution.\n\n .. seealso::\n See :func:`chainer.links.Convolution2D` for the examples of ways to\n give arguments to this link.\n\n .. admonition:: Example\n\n There are several ways to make a Deconvolution2D link.\n\n Let an input vector ``x`` be:\n\n >>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(1, 3, 10, 10)\n\n 1. Give the first three arguments explicitly:\n\n In this case, all the other arguments are set to the default\n values.\n\n >>> l = L.Deconvolution2D(3, 7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n 2. Omit ``in_channels`` or fill it with ``None``:\n\n The below two cases are the same.\n\n >>> l = L.Deconvolution2D(7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n >>> l = L.Deconvolution2D(None, 7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``stride`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Deconvolution2D(None, 7, 4, 2, 1)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 20, 20)\n\n >>> l = L.Deconvolution2D(7, 4, stride=2, pad=1)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 20, 20)\n\n \"\"\" # NOQA\n\n def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,\n nobias=False, outsize=None, initialW=None, initial_bias=None,\n **kwargs):\n super(Deconvolution2D, self).__init__()\n\n groups, = argument.parse_kwargs(\n kwargs, ('groups', 1),\n deterministic=\"deterministic argument is not supported anymore. \"\n \"Use chainer.using_config('cudnn_deterministic', value) \"\n \"context where value is either `True` or `False`.\")\n\n if ksize is None:\n out_channels, ksize, in_channels = in_channels, out_channels, None\n\n self.ksize = ksize\n self.stride = _pair(stride)\n self.pad = _pair(pad)\n self.outsize = (None, None) if outsize is None else outsize\n self.out_channels = out_channels\n self.groups = int(groups)\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer)\n if in_channels is not None:\n self._initialize_params(in_channels)\n\n if nobias:\n self.b = None\n else:\n if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):\n assert initial_bias.shape == (out_channels,)\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_channels)\n\n def _initialize_params(self, in_channels):\n kh, kw = _pair(self.ksize)\n if self.out_channels % self.groups != 0:\n raise ValueError('the number of output channels must be'\n 'divisible by the number of groups')\n if in_channels % self.groups != 0:\n raise ValueError('the number of input channels must be'\n 'divisible by the number of groups')\n W_shape = (in_channels, int(self.out_channels / self.groups), kh, kw)\n self.W.initialize(W_shape)\n\n def forward(self, x):\n if self.W.array is None:\n self._initialize_params(x.shape[1])\n return deconvolution_2d.deconvolution_2d(\n x, self.W, self.b, self.stride, self.pad, self.outsize,\n groups=self.groups)\n\n\ndef _pair(x):\n if hasattr(x, '__getitem__'):\n return x\n return x, x\n", "path": "chainer/links/connection/deconvolution_2d.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer.backends import cuda\nfrom chainer.functions.connection import deconvolution_2d\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer.utils import argument\nfrom chainer import variable\n\n\nclass Deconvolution2D(link.Link):\n\n \"\"\"__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, dilate=1, groups=1)\n\n Two dimensional deconvolution function.\n\n This link wraps the :func:`~chainer.functions.deconvolution_2d` function\n and holds the filter weight and bias vector as parameters.\n\n Deconvolution links can use a feature of cuDNN called autotuning, which\n selects the most efficient CNN algorithm for images of fixed-size, \n can provide a significant performance boost for fixed neural nets.\n To enable, set `chainer.using_config('autotune', True)`\n\n Args:\n in_channels (int or None): Number of channels of input arrays.\n If ``None``, parameter initialization will be deferred until the\n first forward data pass at which time the size will be determined.\n out_channels (int): Number of channels of output arrays.\n ksize (int or pair of ints): Size of filters (a.k.a. kernels).\n ``ksize=k`` and ``ksize=(k, k)`` are equivalent.\n stride (int or pair of ints): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or pair of ints): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n nobias (bool): If ``True``, then this function does not use the bias\n term.\n outsize (tuple): Expected output size of deconvolutional operation.\n It should be pair of height and width :math:`(out_H, out_W)`.\n Default value is ``None`` and the outsize is estimated by\n input size, stride and pad.\n initialW (:ref:`initializer <initializer>`): Initializer to\n initialize the weight. When it is :class:`numpy.ndarray`,\n its ``ndim`` should be 4.\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n dilate (:class:`int` or :class:`tuple` of :class:`int` s):\n Dilation factor of filter applications.\n ``dilate=d`` and ``dilate=(d, d)`` are equivalent.\n groups (int): The number of groups to use grouped deconvolution. The\n default is one, where grouped deconvolution is not used.\n\n The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)`\n which indicate the number of input channels, output channels,\n height and width of the kernels, respectively.\n The filter weight is initialized with i.i.d. Gaussian random samples, each\n of which has zero mean and deviation :math:`\\\\sqrt{1/(c_I k_H k_W)}` by\n default.\n\n The bias vector is of size :math:`c_O`.\n Its elements are initialized by ``bias`` argument.\n If ``nobias`` argument is set to True, then this function does not hold\n the bias parameter.\n\n The output of this function can be non-deterministic when it uses cuDNN.\n If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and\n cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.\n\n .. seealso::\n See :func:`chainer.functions.deconvolution_2d` for the definition of\n two-dimensional convolution.\n\n .. seealso::\n See :func:`chainer.links.Convolution2D` for the examples of ways to\n give arguments to this link.\n\n .. admonition:: Example\n\n There are several ways to make a Deconvolution2D link.\n\n Let an input vector ``x`` be:\n\n >>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(1, 3, 10, 10)\n\n 1. Give the first three arguments explicitly:\n\n In this case, all the other arguments are set to the default\n values.\n\n >>> l = L.Deconvolution2D(3, 7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n 2. Omit ``in_channels`` or fill it with ``None``:\n\n The below two cases are the same.\n\n >>> l = L.Deconvolution2D(7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n >>> l = L.Deconvolution2D(None, 7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``stride`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Deconvolution2D(None, 7, 4, 2, 1)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 20, 20)\n\n >>> l = L.Deconvolution2D(7, 4, stride=2, pad=1)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 20, 20)\n\n \"\"\" # NOQA\n\n def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,\n nobias=False, outsize=None, initialW=None, initial_bias=None,\n **kwargs):\n super(Deconvolution2D, self).__init__()\n\n dilate, groups, = argument.parse_kwargs(\n kwargs, ('dilate', 1), ('groups', 1),\n deterministic=\"deterministic argument is not supported anymore. \"\n \"Use chainer.using_config('cudnn_deterministic', value) \"\n \"context where value is either `True` or `False`.\")\n\n if ksize is None:\n out_channels, ksize, in_channels = in_channels, out_channels, None\n\n self.ksize = ksize\n self.stride = _pair(stride)\n self.pad = _pair(pad)\n self.dilate = _pair(dilate)\n self.outsize = (None, None) if outsize is None else outsize\n self.out_channels = out_channels\n self.groups = int(groups)\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer)\n if in_channels is not None:\n self._initialize_params(in_channels)\n\n if nobias:\n self.b = None\n else:\n if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):\n assert initial_bias.shape == (out_channels,)\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_channels)\n\n def _initialize_params(self, in_channels):\n kh, kw = _pair(self.ksize)\n if self.out_channels % self.groups != 0:\n raise ValueError('the number of output channels must be'\n 'divisible by the number of groups')\n if in_channels % self.groups != 0:\n raise ValueError('the number of input channels must be'\n 'divisible by the number of groups')\n W_shape = (in_channels, int(self.out_channels / self.groups), kh, kw)\n self.W.initialize(W_shape)\n\n def forward(self, x):\n if self.W.array is None:\n self._initialize_params(x.shape[1])\n return deconvolution_2d.deconvolution_2d(\n x, self.W, self.b, self.stride, self.pad, self.outsize,\n dilate=self.dilate, groups=self.groups)\n\n\ndef _pair(x):\n if hasattr(x, '__getitem__'):\n return x\n return x, x\n", "path": "chainer/links/connection/deconvolution_2d.py"}]} | 2,549 | 635 |
gh_patches_debug_61252 | rasdani/github-patches | git_diff | spotify__luigi-880 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix external dynamic deps
Since running tasks communicate with worker via a queue, all dynamic dependencies that they yield must be serialized and then deserialized back. This doesn't work if a task has `run = NotImplemented`, since there was a specific check for that in Register for unclear reason.
This PR adds a test case to reproduce the issue and fixes it by removing the check.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/task_register.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Define the centralized register of all :class:`~luigi.task.Task` classes.
19 """
20
21 import abc
22 try:
23 from collections import OrderedDict
24 except ImportError:
25 from ordereddict import OrderedDict
26
27 from luigi import six
28 import logging
29 logger = logging.getLogger('luigi-interface')
30
31
32 class TaskClassException(Exception):
33 pass
34
35
36 class Register(abc.ABCMeta):
37 """
38 The Metaclass of :py:class:`Task`.
39
40 Acts as a global registry of Tasks with the following properties:
41
42 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the
43 same object.
44 2. Keep track of all subclasses of :py:class:`Task` and expose them.
45 """
46 __instance_cache = {}
47 _default_namespace = None
48 _reg = []
49 AMBIGUOUS_CLASS = object() # Placeholder denoting an error
50 """If this value is returned by :py:meth:`__get_reg` then there is an
51 ambiguous task name (two :py:class:`Task` have the same name). This denotes
52 an error."""
53
54 def __new__(metacls, classname, bases, classdict):
55 """
56 Custom class creation for namespacing.
57
58 Also register all subclasses.
59
60 Set the task namespace to whatever the currently declared namespace is.
61 """
62 if "task_namespace" not in classdict:
63 classdict["task_namespace"] = metacls._default_namespace
64
65 cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)
66 metacls._reg.append(cls)
67
68 return cls
69
70 def __call__(cls, *args, **kwargs):
71 """
72 Custom class instantiation utilizing instance cache.
73
74 If a Task has already been instantiated with the same parameters,
75 the previous instance is returned to reduce number of object instances.
76 """
77 def instantiate():
78 return super(Register, cls).__call__(*args, **kwargs)
79
80 h = cls.__instance_cache
81
82 if h is None: # disabled
83 return instantiate()
84
85 params = cls.get_params()
86 param_values = cls.get_param_values(params, args, kwargs)
87
88 k = (cls, tuple(param_values))
89
90 try:
91 hash(k)
92 except TypeError:
93 logger.debug("Not all parameter values are hashable so instance isn't coming from the cache")
94 return instantiate() # unhashable types in parameters
95
96 if k not in h:
97 h[k] = instantiate()
98
99 return h[k]
100
101 @classmethod
102 def clear_instance_cache(cls):
103 """
104 Clear/Reset the instance cache.
105 """
106 cls.__instance_cache = {}
107
108 @classmethod
109 def disable_instance_cache(cls):
110 """
111 Disables the instance cache.
112 """
113 cls.__instance_cache = None
114
115 @property
116 def task_family(cls):
117 """
118 The task family for the given class.
119
120 If ``cls.task_namespace is None`` then it's the name of the class.
121 Otherwise, ``<task_namespace>.`` is prefixed to the class name.
122 """
123 if cls.task_namespace is None:
124 return cls.__name__
125 else:
126 return "%s.%s" % (cls.task_namespace, cls.__name__)
127
128 @classmethod
129 def __get_reg(cls):
130 """Return all of the registered classes.
131
132 :return: an ``collections.OrderedDict`` of task_family -> class
133 """
134 # We have to do this on-demand in case task names have changed later
135 # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)
136 reg = OrderedDict()
137 for cls in cls._reg:
138 if cls.run == NotImplemented:
139 continue
140 name = cls.task_family
141
142 if name in reg and reg[name] != cls and \
143 reg[name] != cls.AMBIGUOUS_CLASS and \
144 not issubclass(cls, reg[name]):
145 # Registering two different classes - this means we can't instantiate them by name
146 # The only exception is if one class is a subclass of the other. In that case, we
147 # instantiate the most-derived class (this fixes some issues with decorator wrappers).
148 reg[name] = cls.AMBIGUOUS_CLASS
149 else:
150 reg[name] = cls
151
152 return reg
153
154 @classmethod
155 def task_names(cls):
156 """
157 List of task names as strings
158 """
159 return sorted(cls.__get_reg().keys())
160
161 @classmethod
162 def tasks_str(cls):
163 """
164 Human-readable register contents dump.
165 """
166 return ','.join(cls.task_names())
167
168 @classmethod
169 def get_task_cls(cls, name):
170 """
171 Returns an unambiguous class or raises an exception.
172 """
173 task_cls = cls.__get_reg().get(name)
174 if not task_cls:
175 raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str()))
176
177 if task_cls == cls.AMBIGUOUS_CLASS:
178 raise TaskClassException('Task %r is ambiguous' % name)
179 return task_cls
180
181 @classmethod
182 def get_all_params(cls):
183 """
184 Compiles and returns all parameters for all :py:class:`Task`.
185
186 :return: a generator of tuples (TODO: we should make this more elegant)
187 """
188 for task_name, task_cls in six.iteritems(cls.__get_reg()):
189 if task_cls == cls.AMBIGUOUS_CLASS:
190 continue
191 for param_name, param_obj in task_cls.get_params():
192 yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj
193
194
195 def load_task(module, task_name, params_str):
196 """
197 Imports task dynamically given a module and a task name.
198 """
199 if module is not None:
200 __import__(module)
201 task_cls = Register.get_task_cls(task_name)
202 return task_cls.from_str_params(params_str)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/luigi/task_register.py b/luigi/task_register.py
--- a/luigi/task_register.py
+++ b/luigi/task_register.py
@@ -135,8 +135,6 @@
# We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)
reg = OrderedDict()
for cls in cls._reg:
- if cls.run == NotImplemented:
- continue
name = cls.task_family
if name in reg and reg[name] != cls and \
| {"golden_diff": "diff --git a/luigi/task_register.py b/luigi/task_register.py\n--- a/luigi/task_register.py\n+++ b/luigi/task_register.py\n@@ -135,8 +135,6 @@\n # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)\n reg = OrderedDict()\n for cls in cls._reg:\n- if cls.run == NotImplemented:\n- continue\n name = cls.task_family\n \n if name in reg and reg[name] != cls and \\\n", "issue": "Fix external dynamic deps\nSince running tasks communicate with worker via a queue, all dynamic dependencies that they yield must be serialized and then deserialized back. This doesn't work if a task has `run = NotImplemented`, since there was a specific check for that in Register for unclear reason.\n\nThis PR adds a test case to reproduce the issue and fixes it by removing the check.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nDefine the centralized register of all :class:`~luigi.task.Task` classes.\n\"\"\"\n\nimport abc\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\nfrom luigi import six\nimport logging\nlogger = logging.getLogger('luigi-interface')\n\n\nclass TaskClassException(Exception):\n pass\n\n\nclass Register(abc.ABCMeta):\n \"\"\"\n The Metaclass of :py:class:`Task`.\n\n Acts as a global registry of Tasks with the following properties:\n\n 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the\n same object.\n 2. Keep track of all subclasses of :py:class:`Task` and expose them.\n \"\"\"\n __instance_cache = {}\n _default_namespace = None\n _reg = []\n AMBIGUOUS_CLASS = object() # Placeholder denoting an error\n \"\"\"If this value is returned by :py:meth:`__get_reg` then there is an\n ambiguous task name (two :py:class:`Task` have the same name). This denotes\n an error.\"\"\"\n\n def __new__(metacls, classname, bases, classdict):\n \"\"\"\n Custom class creation for namespacing.\n\n Also register all subclasses.\n\n Set the task namespace to whatever the currently declared namespace is.\n \"\"\"\n if \"task_namespace\" not in classdict:\n classdict[\"task_namespace\"] = metacls._default_namespace\n\n cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)\n metacls._reg.append(cls)\n\n return cls\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Custom class instantiation utilizing instance cache.\n\n If a Task has already been instantiated with the same parameters,\n the previous instance is returned to reduce number of object instances.\n \"\"\"\n def instantiate():\n return super(Register, cls).__call__(*args, **kwargs)\n\n h = cls.__instance_cache\n\n if h is None: # disabled\n return instantiate()\n\n params = cls.get_params()\n param_values = cls.get_param_values(params, args, kwargs)\n\n k = (cls, tuple(param_values))\n\n try:\n hash(k)\n except TypeError:\n logger.debug(\"Not all parameter values are hashable so instance isn't coming from the cache\")\n return instantiate() # unhashable types in parameters\n\n if k not in h:\n h[k] = instantiate()\n\n return h[k]\n\n @classmethod\n def clear_instance_cache(cls):\n \"\"\"\n Clear/Reset the instance cache.\n \"\"\"\n cls.__instance_cache = {}\n\n @classmethod\n def disable_instance_cache(cls):\n \"\"\"\n Disables the instance cache.\n \"\"\"\n cls.__instance_cache = None\n\n @property\n def task_family(cls):\n \"\"\"\n The task family for the given class.\n\n If ``cls.task_namespace is None`` then it's the name of the class.\n Otherwise, ``<task_namespace>.`` is prefixed to the class name.\n \"\"\"\n if cls.task_namespace is None:\n return cls.__name__\n else:\n return \"%s.%s\" % (cls.task_namespace, cls.__name__)\n\n @classmethod\n def __get_reg(cls):\n \"\"\"Return all of the registered classes.\n\n :return: an ``collections.OrderedDict`` of task_family -> class\n \"\"\"\n # We have to do this on-demand in case task names have changed later\n # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)\n reg = OrderedDict()\n for cls in cls._reg:\n if cls.run == NotImplemented:\n continue\n name = cls.task_family\n\n if name in reg and reg[name] != cls and \\\n reg[name] != cls.AMBIGUOUS_CLASS and \\\n not issubclass(cls, reg[name]):\n # Registering two different classes - this means we can't instantiate them by name\n # The only exception is if one class is a subclass of the other. In that case, we\n # instantiate the most-derived class (this fixes some issues with decorator wrappers).\n reg[name] = cls.AMBIGUOUS_CLASS\n else:\n reg[name] = cls\n\n return reg\n\n @classmethod\n def task_names(cls):\n \"\"\"\n List of task names as strings\n \"\"\"\n return sorted(cls.__get_reg().keys())\n\n @classmethod\n def tasks_str(cls):\n \"\"\"\n Human-readable register contents dump.\n \"\"\"\n return ','.join(cls.task_names())\n\n @classmethod\n def get_task_cls(cls, name):\n \"\"\"\n Returns an unambiguous class or raises an exception.\n \"\"\"\n task_cls = cls.__get_reg().get(name)\n if not task_cls:\n raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str()))\n\n if task_cls == cls.AMBIGUOUS_CLASS:\n raise TaskClassException('Task %r is ambiguous' % name)\n return task_cls\n\n @classmethod\n def get_all_params(cls):\n \"\"\"\n Compiles and returns all parameters for all :py:class:`Task`.\n\n :return: a generator of tuples (TODO: we should make this more elegant)\n \"\"\"\n for task_name, task_cls in six.iteritems(cls.__get_reg()):\n if task_cls == cls.AMBIGUOUS_CLASS:\n continue\n for param_name, param_obj in task_cls.get_params():\n yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj\n\n\ndef load_task(module, task_name, params_str):\n \"\"\"\n Imports task dynamically given a module and a task name.\n \"\"\"\n if module is not None:\n __import__(module)\n task_cls = Register.get_task_cls(task_name)\n return task_cls.from_str_params(params_str)\n", "path": "luigi/task_register.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nDefine the centralized register of all :class:`~luigi.task.Task` classes.\n\"\"\"\n\nimport abc\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\nfrom luigi import six\nimport logging\nlogger = logging.getLogger('luigi-interface')\n\n\nclass TaskClassException(Exception):\n pass\n\n\nclass Register(abc.ABCMeta):\n \"\"\"\n The Metaclass of :py:class:`Task`.\n\n Acts as a global registry of Tasks with the following properties:\n\n 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the\n same object.\n 2. Keep track of all subclasses of :py:class:`Task` and expose them.\n \"\"\"\n __instance_cache = {}\n _default_namespace = None\n _reg = []\n AMBIGUOUS_CLASS = object() # Placeholder denoting an error\n \"\"\"If this value is returned by :py:meth:`__get_reg` then there is an\n ambiguous task name (two :py:class:`Task` have the same name). This denotes\n an error.\"\"\"\n\n def __new__(metacls, classname, bases, classdict):\n \"\"\"\n Custom class creation for namespacing.\n\n Also register all subclasses.\n\n Set the task namespace to whatever the currently declared namespace is.\n \"\"\"\n if \"task_namespace\" not in classdict:\n classdict[\"task_namespace\"] = metacls._default_namespace\n\n cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)\n metacls._reg.append(cls)\n\n return cls\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Custom class instantiation utilizing instance cache.\n\n If a Task has already been instantiated with the same parameters,\n the previous instance is returned to reduce number of object instances.\n \"\"\"\n def instantiate():\n return super(Register, cls).__call__(*args, **kwargs)\n\n h = cls.__instance_cache\n\n if h is None: # disabled\n return instantiate()\n\n params = cls.get_params()\n param_values = cls.get_param_values(params, args, kwargs)\n\n k = (cls, tuple(param_values))\n\n try:\n hash(k)\n except TypeError:\n logger.debug(\"Not all parameter values are hashable so instance isn't coming from the cache\")\n return instantiate() # unhashable types in parameters\n\n if k not in h:\n h[k] = instantiate()\n\n return h[k]\n\n @classmethod\n def clear_instance_cache(cls):\n \"\"\"\n Clear/Reset the instance cache.\n \"\"\"\n cls.__instance_cache = {}\n\n @classmethod\n def disable_instance_cache(cls):\n \"\"\"\n Disables the instance cache.\n \"\"\"\n cls.__instance_cache = None\n\n @property\n def task_family(cls):\n \"\"\"\n The task family for the given class.\n\n If ``cls.task_namespace is None`` then it's the name of the class.\n Otherwise, ``<task_namespace>.`` is prefixed to the class name.\n \"\"\"\n if cls.task_namespace is None:\n return cls.__name__\n else:\n return \"%s.%s\" % (cls.task_namespace, cls.__name__)\n\n @classmethod\n def __get_reg(cls):\n \"\"\"Return all of the registered classes.\n\n :return: an ``collections.OrderedDict`` of task_family -> class\n \"\"\"\n # We have to do this on-demand in case task names have changed later\n # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)\n reg = OrderedDict()\n for cls in cls._reg:\n name = cls.task_family\n\n if name in reg and reg[name] != cls and \\\n reg[name] != cls.AMBIGUOUS_CLASS and \\\n not issubclass(cls, reg[name]):\n # Registering two different classes - this means we can't instantiate them by name\n # The only exception is if one class is a subclass of the other. In that case, we\n # instantiate the most-derived class (this fixes some issues with decorator wrappers).\n reg[name] = cls.AMBIGUOUS_CLASS\n else:\n reg[name] = cls\n\n return reg\n\n @classmethod\n def task_names(cls):\n \"\"\"\n List of task names as strings\n \"\"\"\n return sorted(cls.__get_reg().keys())\n\n @classmethod\n def tasks_str(cls):\n \"\"\"\n Human-readable register contents dump.\n \"\"\"\n return ','.join(cls.task_names())\n\n @classmethod\n def get_task_cls(cls, name):\n \"\"\"\n Returns an unambiguous class or raises an exception.\n \"\"\"\n task_cls = cls.__get_reg().get(name)\n if not task_cls:\n raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str()))\n\n if task_cls == cls.AMBIGUOUS_CLASS:\n raise TaskClassException('Task %r is ambiguous' % name)\n return task_cls\n\n @classmethod\n def get_all_params(cls):\n \"\"\"\n Compiles and returns all parameters for all :py:class:`Task`.\n\n :return: a generator of tuples (TODO: we should make this more elegant)\n \"\"\"\n for task_name, task_cls in six.iteritems(cls.__get_reg()):\n if task_cls == cls.AMBIGUOUS_CLASS:\n continue\n for param_name, param_obj in task_cls.get_params():\n yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj\n\n\ndef load_task(module, task_name, params_str):\n \"\"\"\n Imports task dynamically given a module and a task name.\n \"\"\"\n if module is not None:\n __import__(module)\n task_cls = Register.get_task_cls(task_name)\n return task_cls.from_str_params(params_str)\n", "path": "luigi/task_register.py"}]} | 2,263 | 120 |
gh_patches_debug_25585 | rasdani/github-patches | git_diff | talonhub__community-758 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve phrase history mechanism
instead of this:
https://github.com/knausj85/knausj_talon/blob/3e57e0165257cf07b0e21880d44a91e79cb3ef16/code/history.py#L19-L29
consider something like this:
```py
def on_phrase(j):
global history
words = j.get('text')
if words:
text = ' '.join(words)
history.append(text)
history = history[-setting_command_history_size.get() :]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/history.py`
Content:
```
1 from talon import imgui, Module, speech_system, actions, app
2
3 # We keep command_history_size lines of history, but by default display only
4 # command_history_display of them.
5 mod = Module()
6 setting_command_history_size = mod.setting("command_history_size", int, default=50)
7 setting_command_history_display = mod.setting(
8 "command_history_display", int, default=10
9 )
10
11 hist_more = False
12 history = []
13
14
15 def parse_phrase(word_list):
16 return " ".join(word.split("\\")[0] for word in word_list)
17
18
19 def on_phrase(j):
20 global history
21
22 try:
23 val = parse_phrase(getattr(j["parsed"], "_unmapped", j["phrase"]))
24 except:
25 val = parse_phrase(j["phrase"])
26
27 if val != "":
28 history.append(val)
29 history = history[-setting_command_history_size.get() :]
30
31
32 # todo: dynamic rect?
33 @imgui.open(y=0)
34 def gui(gui: imgui.GUI):
35 global history
36 gui.text("Command History")
37 gui.line()
38 text = (
39 history[:] if hist_more else history[-setting_command_history_display.get() :]
40 )
41 for line in text:
42 gui.text(line)
43
44 gui.spacer()
45 if gui.button("Command history close"):
46 actions.user.history_disable()
47
48
49 speech_system.register("phrase", on_phrase)
50
51
52 @mod.action_class
53 class Actions:
54 def history_toggle():
55 """Toggles viewing the history"""
56 if gui.showing:
57 gui.hide()
58 else:
59 gui.show()
60
61 def history_enable():
62 """Enables the history"""
63 gui.show()
64
65 def history_disable():
66 """Disables the history"""
67 gui.hide()
68
69 def history_clear():
70 """Clear the history"""
71 global history
72 history = []
73
74 def history_more():
75 """Show more history"""
76 global hist_more
77 hist_more = True
78
79 def history_less():
80 """Show less history"""
81 global hist_more
82 hist_more = False
83
84 def history_get(number: int):
85 """returns the history entry at the specified index"""
86 num = (0 - number) - 1
87 return history[num]
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/code/history.py b/code/history.py
--- a/code/history.py
+++ b/code/history.py
@@ -1,3 +1,4 @@
+from typing import Optional
from talon import imgui, Module, speech_system, actions, app
# We keep command_history_size lines of history, but by default display only
@@ -12,20 +13,15 @@
history = []
-def parse_phrase(word_list):
- return " ".join(word.split("\\")[0] for word in word_list)
-
-
def on_phrase(j):
global history
- try:
- val = parse_phrase(getattr(j["parsed"], "_unmapped", j["phrase"]))
- except:
- val = parse_phrase(j["phrase"])
+ words = j.get('text')
+
+ text = actions.user.history_transform_phrase_text(words)
- if val != "":
- history.append(val)
+ if text is not None:
+ history.append(text)
history = history[-setting_command_history_size.get() :]
@@ -85,3 +81,11 @@
"""returns the history entry at the specified index"""
num = (0 - number) - 1
return history[num]
+
+ def history_transform_phrase_text(words: list[str]) -> Optional[str]:
+ """Transforms phrase text for presentation in history. Return `None` to omit from history"""
+
+ if not actions.speech.enabled():
+ return None
+
+ return ' '.join(words) if words else None
\ No newline at end of file
| {"golden_diff": "diff --git a/code/history.py b/code/history.py\n--- a/code/history.py\n+++ b/code/history.py\n@@ -1,3 +1,4 @@\n+from typing import Optional\n from talon import imgui, Module, speech_system, actions, app\n \n # We keep command_history_size lines of history, but by default display only\n@@ -12,20 +13,15 @@\n history = []\n \n \n-def parse_phrase(word_list):\n- return \" \".join(word.split(\"\\\\\")[0] for word in word_list)\n-\n-\n def on_phrase(j):\n global history\n \n- try:\n- val = parse_phrase(getattr(j[\"parsed\"], \"_unmapped\", j[\"phrase\"]))\n- except:\n- val = parse_phrase(j[\"phrase\"])\n+ words = j.get('text')\n+\n+ text = actions.user.history_transform_phrase_text(words)\n \n- if val != \"\":\n- history.append(val)\n+ if text is not None:\n+ history.append(text)\n history = history[-setting_command_history_size.get() :]\n \n \n@@ -85,3 +81,11 @@\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n+\n+ def history_transform_phrase_text(words: list[str]) -> Optional[str]:\n+ \"\"\"Transforms phrase text for presentation in history. Return `None` to omit from history\"\"\"\n+\n+ if not actions.speech.enabled():\n+ return None\n+\n+ return ' '.join(words) if words else None\n\\ No newline at end of file\n", "issue": "Improve phrase history mechanism\ninstead of this:\r\n\r\nhttps://github.com/knausj85/knausj_talon/blob/3e57e0165257cf07b0e21880d44a91e79cb3ef16/code/history.py#L19-L29\r\n\r\nconsider something like this:\r\n\r\n```py\r\ndef on_phrase(j):\r\n global history\r\n words = j.get('text')\r\n if words:\r\n text = ' '.join(words)\r\n history.append(text)\r\n history = history[-setting_command_history_size.get() :]\r\n```\n", "before_files": [{"content": "from talon import imgui, Module, speech_system, actions, app\n\n# We keep command_history_size lines of history, but by default display only\n# command_history_display of them.\nmod = Module()\nsetting_command_history_size = mod.setting(\"command_history_size\", int, default=50)\nsetting_command_history_display = mod.setting(\n \"command_history_display\", int, default=10\n)\n\nhist_more = False\nhistory = []\n\n\ndef parse_phrase(word_list):\n return \" \".join(word.split(\"\\\\\")[0] for word in word_list)\n\n\ndef on_phrase(j):\n global history\n\n try:\n val = parse_phrase(getattr(j[\"parsed\"], \"_unmapped\", j[\"phrase\"]))\n except:\n val = parse_phrase(j[\"phrase\"])\n\n if val != \"\":\n history.append(val)\n history = history[-setting_command_history_size.get() :]\n\n\n# todo: dynamic rect?\[email protected](y=0)\ndef gui(gui: imgui.GUI):\n global history\n gui.text(\"Command History\")\n gui.line()\n text = (\n history[:] if hist_more else history[-setting_command_history_display.get() :]\n )\n for line in text:\n gui.text(line)\n\n gui.spacer()\n if gui.button(\"Command history close\"):\n actions.user.history_disable()\n\n\nspeech_system.register(\"phrase\", on_phrase)\n\n\[email protected]_class\nclass Actions:\n def history_toggle():\n \"\"\"Toggles viewing the history\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def history_enable():\n \"\"\"Enables the history\"\"\"\n gui.show()\n\n def history_disable():\n \"\"\"Disables the history\"\"\"\n gui.hide()\n\n def history_clear():\n \"\"\"Clear the history\"\"\"\n global history\n history = []\n\n def history_more():\n \"\"\"Show more history\"\"\"\n global hist_more\n hist_more = True\n\n def history_less():\n \"\"\"Show less history\"\"\"\n global hist_more\n hist_more = False\n\n def history_get(number: int):\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n", "path": "code/history.py"}], "after_files": [{"content": "from typing import Optional\nfrom talon import imgui, Module, speech_system, actions, app\n\n# We keep command_history_size lines of history, but by default display only\n# command_history_display of them.\nmod = Module()\nsetting_command_history_size = mod.setting(\"command_history_size\", int, default=50)\nsetting_command_history_display = mod.setting(\n \"command_history_display\", int, default=10\n)\n\nhist_more = False\nhistory = []\n\n\ndef on_phrase(j):\n global history\n\n words = j.get('text')\n\n text = actions.user.history_transform_phrase_text(words)\n\n if text is not None:\n history.append(text)\n history = history[-setting_command_history_size.get() :]\n\n\n# todo: dynamic rect?\[email protected](y=0)\ndef gui(gui: imgui.GUI):\n global history\n gui.text(\"Command History\")\n gui.line()\n text = (\n history[:] if hist_more else history[-setting_command_history_display.get() :]\n )\n for line in text:\n gui.text(line)\n\n gui.spacer()\n if gui.button(\"Command history close\"):\n actions.user.history_disable()\n\n\nspeech_system.register(\"phrase\", on_phrase)\n\n\[email protected]_class\nclass Actions:\n def history_toggle():\n \"\"\"Toggles viewing the history\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def history_enable():\n \"\"\"Enables the history\"\"\"\n gui.show()\n\n def history_disable():\n \"\"\"Disables the history\"\"\"\n gui.hide()\n\n def history_clear():\n \"\"\"Clear the history\"\"\"\n global history\n history = []\n\n def history_more():\n \"\"\"Show more history\"\"\"\n global hist_more\n hist_more = True\n\n def history_less():\n \"\"\"Show less history\"\"\"\n global hist_more\n hist_more = False\n\n def history_get(number: int):\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n\n def history_transform_phrase_text(words: list[str]) -> Optional[str]:\n \"\"\"Transforms phrase text for presentation in history. Return `None` to omit from history\"\"\"\n\n if not actions.speech.enabled():\n return None\n\n return ' '.join(words) if words else None", "path": "code/history.py"}]} | 1,026 | 341 |
gh_patches_debug_16299 | rasdani/github-patches | git_diff | sopel-irc__sopel-1347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
search: Unicode URLs get mojibake'd under Python 2
Behold, the curse of bad Unicode handling:
````
23:33:27 <~dgw> ;g grandorder.wiki chulainn alter
23:33:28 <&Kaede> dgw: https://grandorder.wiki/Cú_Chulainn_(Alter)
````
Versus the glory of Python 3:
````
01:00:10 <~dgw> .g grandorder.wiki chulainn alter
01:00:11 <Sopel> dgw: https://grandorder.wiki/Cú_Chulainn_(Alter)
````
Thanks to @prahulkgp for triggering this error, so I could find it. I've already added a test case using this exact query locally, both to verify the fix and to see that it doesn't break later. (Here's hoping it doesn't start bugging out Travis builds like the existing Bing tests sometimes do.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/search.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2008-9, Sean B. Palmer, inamidst.com
3 # Copyright 2012, Elsie Powell, embolalia.com
4 # Licensed under the Eiffel Forum License 2.
5 from __future__ import unicode_literals, absolute_import, print_function, division
6
7 import re
8 from sopel import web
9 from sopel.module import commands, example
10 import requests
11 import xmltodict
12 import sys
13
14 if sys.version_info.major < 3:
15 from urllib import quote_plus, unquote
16 else:
17 from urllib.parse import quote_plus, unquote
18
19
20 def formatnumber(n):
21 """Format a number with beautiful commas."""
22 parts = list(str(n))
23 for i in range((len(parts) - 3), 0, -3):
24 parts.insert(i, ',')
25 return ''.join(parts)
26
27
28 r_bing = re.compile(r'<h2(?: class=" b_topTitle")?><a href="([^"]+)"')
29
30
31 def bing_search(query, lang='en-US'):
32 base = 'https://www.bing.com/search?mkt=%s&q=' % lang
33 bytes = requests.get(base + query).text
34 m = r_bing.search(bytes)
35 if m:
36 return m.group(1)
37
38
39 r_duck = re.compile(r'nofollow" class="[^"]+" href="(?!(?:https?:\/\/r\.search\.yahoo)|(?:https?:\/\/duckduckgo\.com\/y\.js)(?:\/l\/\?kh=-1&uddg=))(.*?)">')
40
41
42 def duck_search(query):
43 query = query.replace('!', '')
44 uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en' % query
45 bytes = requests.get(uri, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}).text
46 if 'web-result' in bytes: # filter out the adds on top of the page
47 bytes = bytes.split('web-result')[1]
48 m = r_duck.search(bytes)
49 if m:
50 unquoted_m = unquote(m.group(1))
51 return web.decode(unquoted_m)
52
53
54 # Alias google_search to duck_search
55 google_search = duck_search
56
57
58 def duck_api(query):
59 if '!bang' in query.lower():
60 return 'https://duckduckgo.com/bang.html'
61
62 # This fixes issue #885 (https://github.com/sopel-irc/sopel/issues/885)
63 # It seems that duckduckgo api redirects to its Instant answer API html page
64 # if the query constains special charactares that aren't urlencoded.
65 # So in order to always get a JSON response back the query is urlencoded
66 query = quote_plus(query)
67 uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query
68 try:
69 results = requests.get(uri).json()
70 except ValueError:
71 return None
72 if results['Redirect']:
73 return results['Redirect']
74 else:
75 return None
76
77
78 @commands('duck', 'ddg', 'g')
79 @example('.duck sopel irc bot', r'https?:\/\/sopel\.chat\/?', re=True)
80 def duck(bot, trigger):
81 """Queries Duck Duck Go for the specified input."""
82 query = trigger.group(2)
83 if not query:
84 return bot.reply('.ddg what?')
85
86 # If the API gives us something, say it and stop
87 result = duck_api(query)
88 if result:
89 bot.reply(result)
90 return
91
92 # Otherwise, look it up on the HTMl version
93 uri = duck_search(query)
94
95 if uri:
96 bot.reply(uri)
97 if 'last_seen_url' in bot.memory:
98 bot.memory['last_seen_url'][trigger.sender] = uri
99 else:
100 bot.reply("No results found for '%s'." % query)
101
102
103 @commands('bing')
104 @example('.bing sopel irc bot', r'https?:\/\/sopel\.chat\/?', re=True)
105 def bing(bot, trigger):
106 """Queries Bing for the specified input."""
107 if not trigger.group(2):
108 return bot.reply('.bing what?')
109 query = trigger.group(2)
110 result = bing_search(query)
111 if result:
112 bot.say(result)
113 else:
114 bot.reply("No results found for '%s'." % query)
115
116
117 @commands('search')
118 @example('.search sopel irc bot', r'(https?:\/\/sopel\.chat\/? \(b, d\)|https?:\/\/sopel\.chat\/? \(b\), https?:\/\/sopel\.chat\/? \(d\))', re=True)
119 def search(bot, trigger):
120 """Searches Bing and Duck Duck Go."""
121 if not trigger.group(2):
122 return bot.reply('.search for what?')
123 query = trigger.group(2)
124 bu = bing_search(query) or '-'
125 du = duck_search(query) or '-'
126
127 if bu == du:
128 result = '%s (b, d)' % bu
129 else:
130 if len(bu) > 150:
131 bu = '(extremely long link)'
132 if len(du) > 150:
133 du = '(extremely long link)'
134 result = '%s (b), %s (d)' % (bu, du)
135
136 bot.reply(result)
137
138
139 @commands('suggest')
140 @example('.suggest wikip', 'wikipedia')
141 @example('.suggest ', 'No query term.')
142 @example('.suggest lkashdfiauwgeaef', 'Sorry, no result.')
143 def suggest(bot, trigger):
144 """Suggest terms starting with given input"""
145 if not trigger.group(2):
146 return bot.reply("No query term.")
147 query = trigger.group(2)
148 # Using Google isn't necessarily ideal, but at most they'll be able to build
149 # a composite profile of all users on a given instance, not a profile of any
150 # single user. This can be switched out as soon as someone finds (or builds)
151 # an alternative suggestion API.
152 uri = 'https://suggestqueries.google.com/complete/search?output=toolbar&hl=en&q='
153 answer = xmltodict.parse(requests.get(uri + query.replace('+', '%2B')).text)['toplevel']
154 try:
155 answer = answer['CompleteSuggestion'][0]['suggestion']['@data']
156 except TypeError:
157 answer = None
158 if answer:
159 bot.say(answer)
160 else:
161 bot.reply('Sorry, no result.')
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/search.py b/sopel/modules/search.py
--- a/sopel/modules/search.py
+++ b/sopel/modules/search.py
@@ -12,7 +12,8 @@
import sys
if sys.version_info.major < 3:
- from urllib import quote_plus, unquote
+ from urllib import quote_plus, unquote as _unquote
+ unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')
else:
from urllib.parse import quote_plus, unquote
@@ -76,6 +77,9 @@
@commands('duck', 'ddg', 'g')
+# test for bad Unicode handling in py2
+@example('.duck grandorder.wiki chulainn alter', 'https://grandorder.wiki/Cú_Chulainn_(Alter)')
+# the last example is what .help displays
@example('.duck sopel irc bot', r'https?:\/\/sopel\.chat\/?', re=True)
def duck(bot, trigger):
"""Queries Duck Duck Go for the specified input."""
| {"golden_diff": "diff --git a/sopel/modules/search.py b/sopel/modules/search.py\n--- a/sopel/modules/search.py\n+++ b/sopel/modules/search.py\n@@ -12,7 +12,8 @@\n import sys\n \n if sys.version_info.major < 3:\n- from urllib import quote_plus, unquote\n+ from urllib import quote_plus, unquote as _unquote\n+ unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\n else:\n from urllib.parse import quote_plus, unquote\n \n@@ -76,6 +77,9 @@\n \n \n @commands('duck', 'ddg', 'g')\n+# test for bad Unicode handling in py2\n+@example('.duck grandorder.wiki chulainn alter', 'https://grandorder.wiki/C\u00fa_Chulainn_(Alter)')\n+# the last example is what .help displays\n @example('.duck sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\n def duck(bot, trigger):\n \"\"\"Queries Duck Duck Go for the specified input.\"\"\"\n", "issue": "search: Unicode URLs get mojibake'd under Python 2\nBehold, the curse of bad Unicode handling:\r\n\r\n````\r\n23:33:27 <~dgw> ;g grandorder.wiki chulainn alter\r\n23:33:28 <&Kaede> dgw: https://grandorder.wiki/C\u00c3\u00ba_Chulainn_(Alter)\r\n````\r\n\r\nVersus the glory of Python 3:\r\n\r\n````\r\n01:00:10 <~dgw> .g grandorder.wiki chulainn alter\r\n01:00:11 <Sopel> dgw: https://grandorder.wiki/C\u00fa_Chulainn_(Alter)\r\n````\r\n\r\nThanks to @prahulkgp for triggering this error, so I could find it. I've already added a test case using this exact query locally, both to verify the fix and to see that it doesn't break later. (Here's hoping it doesn't start bugging out Travis builds like the existing Bing tests sometimes do.)\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2008-9, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web\nfrom sopel.module import commands, example\nimport requests\nimport xmltodict\nimport sys\n\nif sys.version_info.major < 3:\n from urllib import quote_plus, unquote\nelse:\n from urllib.parse import quote_plus, unquote\n\n\ndef formatnumber(n):\n \"\"\"Format a number with beautiful commas.\"\"\"\n parts = list(str(n))\n for i in range((len(parts) - 3), 0, -3):\n parts.insert(i, ',')\n return ''.join(parts)\n\n\nr_bing = re.compile(r'<h2(?: class=\" b_topTitle\")?><a href=\"([^\"]+)\"')\n\n\ndef bing_search(query, lang='en-US'):\n base = 'https://www.bing.com/search?mkt=%s&q=' % lang\n bytes = requests.get(base + query).text\n m = r_bing.search(bytes)\n if m:\n return m.group(1)\n\n\nr_duck = re.compile(r'nofollow\" class=\"[^\"]+\" href=\"(?!(?:https?:\\/\\/r\\.search\\.yahoo)|(?:https?:\\/\\/duckduckgo\\.com\\/y\\.js)(?:\\/l\\/\\?kh=-1&uddg=))(.*?)\">')\n\n\ndef duck_search(query):\n query = query.replace('!', '')\n uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en' % query\n bytes = requests.get(uri, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}).text\n if 'web-result' in bytes: # filter out the adds on top of the page\n bytes = bytes.split('web-result')[1]\n m = r_duck.search(bytes)\n if m:\n unquoted_m = unquote(m.group(1))\n return web.decode(unquoted_m)\n\n\n# Alias google_search to duck_search\ngoogle_search = duck_search\n\n\ndef duck_api(query):\n if '!bang' in query.lower():\n return 'https://duckduckgo.com/bang.html'\n\n # This fixes issue #885 (https://github.com/sopel-irc/sopel/issues/885)\n # It seems that duckduckgo api redirects to its Instant answer API html page\n # if the query constains special charactares that aren't urlencoded.\n # So in order to always get a JSON response back the query is urlencoded\n query = quote_plus(query)\n uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query\n try:\n results = requests.get(uri).json()\n except ValueError:\n return None\n if results['Redirect']:\n return results['Redirect']\n else:\n return None\n\n\n@commands('duck', 'ddg', 'g')\n@example('.duck sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\ndef duck(bot, trigger):\n \"\"\"Queries Duck Duck Go for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.ddg what?')\n\n # If the API gives us something, say it and stop\n result = duck_api(query)\n if result:\n bot.reply(result)\n return\n\n # Otherwise, look it up on the HTMl version\n uri = duck_search(query)\n\n if uri:\n bot.reply(uri)\n if 'last_seen_url' in bot.memory:\n bot.memory['last_seen_url'][trigger.sender] = uri\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('bing')\n@example('.bing sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\ndef bing(bot, trigger):\n \"\"\"Queries Bing for the specified input.\"\"\"\n if not trigger.group(2):\n return bot.reply('.bing what?')\n query = trigger.group(2)\n result = bing_search(query)\n if result:\n bot.say(result)\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('search')\n@example('.search sopel irc bot', r'(https?:\\/\\/sopel\\.chat\\/? \\(b, d\\)|https?:\\/\\/sopel\\.chat\\/? \\(b\\), https?:\\/\\/sopel\\.chat\\/? \\(d\\))', re=True)\ndef search(bot, trigger):\n \"\"\"Searches Bing and Duck Duck Go.\"\"\"\n if not trigger.group(2):\n return bot.reply('.search for what?')\n query = trigger.group(2)\n bu = bing_search(query) or '-'\n du = duck_search(query) or '-'\n\n if bu == du:\n result = '%s (b, d)' % bu\n else:\n if len(bu) > 150:\n bu = '(extremely long link)'\n if len(du) > 150:\n du = '(extremely long link)'\n result = '%s (b), %s (d)' % (bu, du)\n\n bot.reply(result)\n\n\n@commands('suggest')\n@example('.suggest wikip', 'wikipedia')\n@example('.suggest ', 'No query term.')\n@example('.suggest lkashdfiauwgeaef', 'Sorry, no result.')\ndef suggest(bot, trigger):\n \"\"\"Suggest terms starting with given input\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No query term.\")\n query = trigger.group(2)\n # Using Google isn't necessarily ideal, but at most they'll be able to build\n # a composite profile of all users on a given instance, not a profile of any\n # single user. This can be switched out as soon as someone finds (or builds)\n # an alternative suggestion API.\n uri = 'https://suggestqueries.google.com/complete/search?output=toolbar&hl=en&q='\n answer = xmltodict.parse(requests.get(uri + query.replace('+', '%2B')).text)['toplevel']\n try:\n answer = answer['CompleteSuggestion'][0]['suggestion']['@data']\n except TypeError:\n answer = None\n if answer:\n bot.say(answer)\n else:\n bot.reply('Sorry, no result.')\n", "path": "sopel/modules/search.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2008-9, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web\nfrom sopel.module import commands, example\nimport requests\nimport xmltodict\nimport sys\n\nif sys.version_info.major < 3:\n from urllib import quote_plus, unquote as _unquote\n unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\nelse:\n from urllib.parse import quote_plus, unquote\n\n\ndef formatnumber(n):\n \"\"\"Format a number with beautiful commas.\"\"\"\n parts = list(str(n))\n for i in range((len(parts) - 3), 0, -3):\n parts.insert(i, ',')\n return ''.join(parts)\n\n\nr_bing = re.compile(r'<h2(?: class=\" b_topTitle\")?><a href=\"([^\"]+)\"')\n\n\ndef bing_search(query, lang='en-US'):\n base = 'https://www.bing.com/search?mkt=%s&q=' % lang\n bytes = requests.get(base + query).text\n m = r_bing.search(bytes)\n if m:\n return m.group(1)\n\n\nr_duck = re.compile(r'nofollow\" class=\"[^\"]+\" href=\"(?!(?:https?:\\/\\/r\\.search\\.yahoo)|(?:https?:\\/\\/duckduckgo\\.com\\/y\\.js)(?:\\/l\\/\\?kh=-1&uddg=))(.*?)\">')\n\n\ndef duck_search(query):\n query = query.replace('!', '')\n uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en' % query\n bytes = requests.get(uri, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}).text\n if 'web-result' in bytes: # filter out the adds on top of the page\n bytes = bytes.split('web-result')[1]\n m = r_duck.search(bytes)\n if m:\n unquoted_m = unquote(m.group(1))\n return web.decode(unquoted_m)\n\n\n# Alias google_search to duck_search\ngoogle_search = duck_search\n\n\ndef duck_api(query):\n if '!bang' in query.lower():\n return 'https://duckduckgo.com/bang.html'\n\n # This fixes issue #885 (https://github.com/sopel-irc/sopel/issues/885)\n # It seems that duckduckgo api redirects to its Instant answer API html page\n # if the query constains special charactares that aren't urlencoded.\n # So in order to always get a JSON response back the query is urlencoded\n query = quote_plus(query)\n uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query\n try:\n results = requests.get(uri).json()\n except ValueError:\n return None\n if results['Redirect']:\n return results['Redirect']\n else:\n return None\n\n\n@commands('duck', 'ddg', 'g')\n# test for bad Unicode handling in py2\n@example('.duck grandorder.wiki chulainn alter', 'https://grandorder.wiki/C\u00fa_Chulainn_(Alter)')\n# the last example is what .help displays\n@example('.duck sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\ndef duck(bot, trigger):\n \"\"\"Queries Duck Duck Go for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.ddg what?')\n\n # If the API gives us something, say it and stop\n result = duck_api(query)\n if result:\n bot.reply(result)\n return\n\n # Otherwise, look it up on the HTMl version\n uri = duck_search(query)\n\n if uri:\n bot.reply(uri)\n if 'last_seen_url' in bot.memory:\n bot.memory['last_seen_url'][trigger.sender] = uri\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('bing')\n@example('.bing sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\ndef bing(bot, trigger):\n \"\"\"Queries Bing for the specified input.\"\"\"\n if not trigger.group(2):\n return bot.reply('.bing what?')\n query = trigger.group(2)\n result = bing_search(query)\n if result:\n bot.say(result)\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('search')\n@example('.search sopel irc bot', r'(https?:\\/\\/sopel\\.chat\\/? \\(b, d\\)|https?:\\/\\/sopel\\.chat\\/? \\(b\\), https?:\\/\\/sopel\\.chat\\/? \\(d\\))', re=True)\ndef search(bot, trigger):\n \"\"\"Searches Bing and Duck Duck Go.\"\"\"\n if not trigger.group(2):\n return bot.reply('.search for what?')\n query = trigger.group(2)\n bu = bing_search(query) or '-'\n du = duck_search(query) or '-'\n\n if bu == du:\n result = '%s (b, d)' % bu\n else:\n if len(bu) > 150:\n bu = '(extremely long link)'\n if len(du) > 150:\n du = '(extremely long link)'\n result = '%s (b), %s (d)' % (bu, du)\n\n bot.reply(result)\n\n\n@commands('suggest')\n@example('.suggest wikip', 'wikipedia')\n@example('.suggest ', 'No query term.')\n@example('.suggest lkashdfiauwgeaef', 'Sorry, no result.')\ndef suggest(bot, trigger):\n \"\"\"Suggest terms starting with given input\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No query term.\")\n query = trigger.group(2)\n # Using Google isn't necessarily ideal, but at most they'll be able to build\n # a composite profile of all users on a given instance, not a profile of any\n # single user. This can be switched out as soon as someone finds (or builds)\n # an alternative suggestion API.\n uri = 'https://suggestqueries.google.com/complete/search?output=toolbar&hl=en&q='\n answer = xmltodict.parse(requests.get(uri + query.replace('+', '%2B')).text)['toplevel']\n try:\n answer = answer['CompleteSuggestion'][0]['suggestion']['@data']\n except TypeError:\n answer = None\n if answer:\n bot.say(answer)\n else:\n bot.reply('Sorry, no result.')\n", "path": "sopel/modules/search.py"}]} | 2,360 | 239 |
gh_patches_debug_24452 | rasdani/github-patches | git_diff | scrapy__scrapy-4532 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fail or warn if from_crawler() returns None
## Summary
Generate a warning or error if from_crawler() for a middleware/extension/etc. returns None
## Motivation
I created a custom extension and connected signals in the from_crawler() classmethod, but neglected to return the new extension instance. Scrapy still reported the extension under "Enabled extensions", but none of the signals worked, since the instance was immediately garbage collected and its signals were silently disconnected.
This was of course an error on my part, but it would have saved me a lot of debugging if I had gotten a warning that from_crawler() was returning None, or if the extension were removed from the "Enabled extensions" list.
Would it be appropriate for utils.misc.create_instance() to raise an error or generate a warning if it's about to return None? Or should MiddlewareManager treat create_instance() returning None the same as create_instance() raising NotConfigured?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/misc.py`
Content:
```
1 """Helper functions which don't fit anywhere else"""
2 import ast
3 import inspect
4 import os
5 import re
6 import hashlib
7 import warnings
8 from contextlib import contextmanager
9 from importlib import import_module
10 from pkgutil import iter_modules
11 from textwrap import dedent
12
13 from w3lib.html import replace_entities
14
15 from scrapy.utils.datatypes import LocalWeakReferencedCache
16 from scrapy.utils.python import flatten, to_unicode
17 from scrapy.item import BaseItem
18
19
20 _ITERABLE_SINGLE_VALUES = dict, BaseItem, str, bytes
21
22
23 def arg_to_iter(arg):
24 """Convert an argument to an iterable. The argument can be a None, single
25 value, or an iterable.
26
27 Exception: if arg is a dict, [arg] will be returned
28 """
29 if arg is None:
30 return []
31 elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
32 return arg
33 else:
34 return [arg]
35
36
37 def load_object(path):
38 """Load an object given its absolute object path, and return it.
39
40 object can be the import path of a class, function, variable or an
41 instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
42 """
43
44 try:
45 dot = path.rindex('.')
46 except ValueError:
47 raise ValueError("Error loading object '%s': not a full path" % path)
48
49 module, name = path[:dot], path[dot + 1:]
50 mod = import_module(module)
51
52 try:
53 obj = getattr(mod, name)
54 except AttributeError:
55 raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
56
57 return obj
58
59
60 def walk_modules(path):
61 """Loads a module and all its submodules from the given module path and
62 returns them. If *any* module throws an exception while importing, that
63 exception is thrown back.
64
65 For example: walk_modules('scrapy.utils')
66 """
67
68 mods = []
69 mod = import_module(path)
70 mods.append(mod)
71 if hasattr(mod, '__path__'):
72 for _, subpath, ispkg in iter_modules(mod.__path__):
73 fullpath = path + '.' + subpath
74 if ispkg:
75 mods += walk_modules(fullpath)
76 else:
77 submod = import_module(fullpath)
78 mods.append(submod)
79 return mods
80
81
82 def extract_regex(regex, text, encoding='utf-8'):
83 """Extract a list of unicode strings from the given text/encoding using the following policies:
84
85 * if the regex contains a named group called "extract" that will be returned
86 * if the regex contains multiple numbered groups, all those will be returned (flattened)
87 * if the regex doesn't contain any group the entire regex matching is returned
88 """
89
90 if isinstance(regex, str):
91 regex = re.compile(regex, re.UNICODE)
92
93 try:
94 strings = [regex.search(text).group('extract')] # named group
95 except Exception:
96 strings = regex.findall(text) # full regex or numbered groups
97 strings = flatten(strings)
98
99 if isinstance(text, str):
100 return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
101 else:
102 return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
103 for s in strings]
104
105
106 def md5sum(file):
107 """Calculate the md5 checksum of a file-like object without reading its
108 whole content in memory.
109
110 >>> from io import BytesIO
111 >>> md5sum(BytesIO(b'file content to hash'))
112 '784406af91dd5a54fbb9c84c2236595a'
113 """
114 m = hashlib.md5()
115 while True:
116 d = file.read(8096)
117 if not d:
118 break
119 m.update(d)
120 return m.hexdigest()
121
122
123 def rel_has_nofollow(rel):
124 """Return True if link rel attribute has nofollow type"""
125 return rel is not None and 'nofollow' in rel.split()
126
127
128 def create_instance(objcls, settings, crawler, *args, **kwargs):
129 """Construct a class instance using its ``from_crawler`` or
130 ``from_settings`` constructors, if available.
131
132 At least one of ``settings`` and ``crawler`` needs to be different from
133 ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
134 If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
135 tried.
136
137 ``*args`` and ``**kwargs`` are forwarded to the constructors.
138
139 Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
140 """
141 if settings is None:
142 if crawler is None:
143 raise ValueError("Specify at least one of settings and crawler.")
144 settings = crawler.settings
145 if crawler and hasattr(objcls, 'from_crawler'):
146 return objcls.from_crawler(crawler, *args, **kwargs)
147 elif hasattr(objcls, 'from_settings'):
148 return objcls.from_settings(settings, *args, **kwargs)
149 else:
150 return objcls(*args, **kwargs)
151
152
153 @contextmanager
154 def set_environ(**kwargs):
155 """Temporarily set environment variables inside the context manager and
156 fully restore previous environment afterwards
157 """
158
159 original_env = {k: os.environ.get(k) for k in kwargs}
160 os.environ.update(kwargs)
161 try:
162 yield
163 finally:
164 for k, v in original_env.items():
165 if v is None:
166 del os.environ[k]
167 else:
168 os.environ[k] = v
169
170
171 _generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
172
173
174 def is_generator_with_return_value(callable):
175 """
176 Returns True if a callable is a generator function which includes a
177 'return' statement with a value different than None, False otherwise
178 """
179 if callable in _generator_callbacks_cache:
180 return _generator_callbacks_cache[callable]
181
182 def returns_none(return_node):
183 value = return_node.value
184 return value is None or isinstance(value, ast.NameConstant) and value.value is None
185
186 if inspect.isgeneratorfunction(callable):
187 tree = ast.parse(dedent(inspect.getsource(callable)))
188 for node in ast.walk(tree):
189 if isinstance(node, ast.Return) and not returns_none(node):
190 _generator_callbacks_cache[callable] = True
191 return _generator_callbacks_cache[callable]
192
193 _generator_callbacks_cache[callable] = False
194 return _generator_callbacks_cache[callable]
195
196
197 def warn_on_generator_with_return_value(spider, callable):
198 """
199 Logs a warning if a callable is a generator function and includes
200 a 'return' statement with a value different than None
201 """
202 if is_generator_with_return_value(callable):
203 warnings.warn(
204 'The "{}.{}" method is a generator and includes a "return" statement with a '
205 'value different than None. This could lead to unexpected behaviour. Please see '
206 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
207 'for details about the semantics of the "return" statement within generators'
208 .format(spider.__class__.__name__, callable.__name__), stacklevel=2,
209 )
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -137,17 +137,26 @@
``*args`` and ``**kwargs`` are forwarded to the constructors.
Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
+
+ Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an
+ extension has not been implemented correctly).
"""
if settings is None:
if crawler is None:
raise ValueError("Specify at least one of settings and crawler.")
settings = crawler.settings
if crawler and hasattr(objcls, 'from_crawler'):
- return objcls.from_crawler(crawler, *args, **kwargs)
+ instance = objcls.from_crawler(crawler, *args, **kwargs)
+ method_name = 'from_crawler'
elif hasattr(objcls, 'from_settings'):
- return objcls.from_settings(settings, *args, **kwargs)
+ instance = objcls.from_settings(settings, *args, **kwargs)
+ method_name = 'from_settings'
else:
- return objcls(*args, **kwargs)
+ instance = objcls(*args, **kwargs)
+ method_name = '__new__'
+ if instance is None:
+ raise TypeError("%s.%s returned None" % (objcls.__qualname__, method_name))
+ return instance
@contextmanager
| {"golden_diff": "diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py\n--- a/scrapy/utils/misc.py\n+++ b/scrapy/utils/misc.py\n@@ -137,17 +137,26 @@\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n \n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n+\n+ Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n+ extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n- return objcls.from_crawler(crawler, *args, **kwargs)\n+ instance = objcls.from_crawler(crawler, *args, **kwargs)\n+ method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n- return objcls.from_settings(settings, *args, **kwargs)\n+ instance = objcls.from_settings(settings, *args, **kwargs)\n+ method_name = 'from_settings'\n else:\n- return objcls(*args, **kwargs)\n+ instance = objcls(*args, **kwargs)\n+ method_name = '__new__'\n+ if instance is None:\n+ raise TypeError(\"%s.%s returned None\" % (objcls.__qualname__, method_name))\n+ return instance\n \n \n @contextmanager\n", "issue": "Fail or warn if from_crawler() returns None\n## Summary\r\n\r\nGenerate a warning or error if from_crawler() for a middleware/extension/etc. returns None\r\n\r\n## Motivation\r\n\r\nI created a custom extension and connected signals in the from_crawler() classmethod, but neglected to return the new extension instance. Scrapy still reported the extension under \"Enabled extensions\", but none of the signals worked, since the instance was immediately garbage collected and its signals were silently disconnected.\r\n\r\nThis was of course an error on my part, but it would have saved me a lot of debugging if I had gotten a warning that from_crawler() was returning None, or if the extension were removed from the \"Enabled extensions\" list.\r\n\r\nWould it be appropriate for utils.misc.create_instance() to raise an error or generate a warning if it's about to return None? Or should MiddlewareManager treat create_instance() returning None the same as create_instance() raising NotConfigured?\n", "before_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\nfrom textwrap import dedent\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import BaseItem\n\n\n_ITERABLE_SINGLE_VALUES = dict, BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n return objcls.from_crawler(crawler, *args, **kwargs)\n elif hasattr(objcls, 'from_settings'):\n return objcls.from_settings(settings, *args, **kwargs)\n else:\n return objcls(*args, **kwargs)\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n for node in ast.walk(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n if is_generator_with_return_value(callable):\n warnings.warn(\n 'The \"{}.{}\" method is a generator and includes a \"return\" statement with a '\n 'value different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators'\n .format(spider.__class__.__name__, callable.__name__), stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}], "after_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\nfrom textwrap import dedent\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import BaseItem\n\n\n_ITERABLE_SINGLE_VALUES = dict, BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n\n Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n instance = objcls.from_settings(settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(\"%s.%s returned None\" % (objcls.__qualname__, method_name))\n return instance\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n for node in ast.walk(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n if is_generator_with_return_value(callable):\n warnings.warn(\n 'The \"{}.{}\" method is a generator and includes a \"return\" statement with a '\n 'value different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators'\n .format(spider.__class__.__name__, callable.__name__), stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}]} | 2,565 | 331 |
gh_patches_debug_25169 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-617 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement adding a new empty table
## Problem
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Users may want to create an empty table.
## Proposed solution
<!-- A clear and concise description of your proposed solution or feature. -->
The ["Basic Table Operations" design spec](https://wiki.mathesar.org/design/specs/table-operations) has a solution for this, which we need to implement on the frontend. We will be implementing a different design than the one in the spec, which will need to be improvised.
This issue involves the following portions of the spec:
- _User Experience_:
- User adds a new table
- User edits a new table name
- User adds an empty table
- _Interactions_:
- Sequential Table Names
Please note that we'll need to generate and display a default name of the table that the user can use. Following the logic in #449 is recommended.
## Additional context
<!-- Add any other context or screenshots about the feature request here.-->
- Backend work: #184
- Design issue: #185
- #449 is related.
Logs of conversation on Matrix from @pavish:
> For adding an empty table, instead of showing Empty table option within the tab, I think it might be better to show it on the top.
>
> The Add new table button can be changed to a dropdown with two options. 1. Empty table, 2. Import data.
>
> On clicking empty table, it would just send a create table request and create the table. Then the user can rename it if they want.
>
> This will have clean separation of concerns. Because Import data, involves importing from csv, remote file import and then copy/paste from spreadsheet..
>
> Empty table, would just create a new empty table.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/viewsets/tables.py`
Content:
```
1 from django_filters import rest_framework as filters
2 from psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.exceptions import ValidationError, APIException
6 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
7 from rest_framework.response import Response
8 from sqlalchemy.exc import ProgrammingError, DataError, IntegrityError
9
10 from db.types.alteration import UnsupportedTypeException
11 from mathesar.api.filters import TableFilter
12 from mathesar.api.pagination import DefaultLimitOffsetPagination
13 from mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer
14 from mathesar.models import Table
15 from mathesar.utils.tables import (
16 get_table_column_types, create_table_from_datafile, create_empty_table,
17 gen_table_name
18 )
19
20
21 class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
22 serializer_class = TableSerializer
23 pagination_class = DefaultLimitOffsetPagination
24 filter_backends = (filters.DjangoFilterBackend,)
25 filterset_class = TableFilter
26
27 def get_queryset(self):
28 return Table.objects.all().order_by('-created_at')
29
30 def create(self, request):
31 serializer = TableSerializer(data=request.data, context={'request': request})
32 serializer.is_valid(raise_exception=True)
33
34 if not serializer.validated_data['name']:
35 name = gen_table_name(
36 serializer.validated_data['schema'],
37 serializer.validated_data['data_files'],
38 )
39 else:
40 name = serializer.validated_data['name']
41
42 try:
43 if serializer.validated_data['data_files']:
44 table = create_table_from_datafile(
45 serializer.validated_data['data_files'],
46 name,
47 serializer.validated_data['schema'],
48 )
49 else:
50 table = create_empty_table(
51 name,
52 serializer.validated_data['schema']
53 )
54 except ProgrammingError as e:
55 if type(e.orig) == DuplicateTable:
56 raise ValidationError(
57 f"Relation {request.data['name']} already exists in schema {request.data['schema']}"
58 )
59 else:
60 raise APIException(e)
61
62 serializer = TableSerializer(table, context={'request': request})
63 return Response(serializer.data, status=status.HTTP_201_CREATED)
64
65 def partial_update(self, request, pk=None):
66 serializer = TableSerializer(
67 data=request.data, context={'request': request}, partial=True
68 )
69 serializer.is_valid(raise_exception=True)
70 table = self.get_object()
71
72 # Save the fields that are stored in the model.
73 present_model_fields = []
74 for model_field in table.MODEL_FIELDS:
75 if model_field in serializer.validated_data:
76 setattr(table, model_field, serializer.validated_data[model_field])
77 present_model_fields.append(model_field)
78 table.save(update_fields=present_model_fields)
79 for key in present_model_fields:
80 del serializer.validated_data[key]
81
82 # Save the fields that are stored in the underlying DB.
83 try:
84 table.update_sa_table(serializer.validated_data)
85 except ValueError as e:
86 raise ValidationError(e)
87
88 # Reload the table to avoid cached properties
89 table = self.get_object()
90 serializer = TableSerializer(table, context={'request': request})
91 return Response(serializer.data)
92
93 def destroy(self, request, pk=None):
94 table = self.get_object()
95 table.delete_sa_table()
96 table.delete()
97 return Response(status=status.HTTP_204_NO_CONTENT)
98
99 @action(methods=['get'], detail=True)
100 def type_suggestions(self, request, pk=None):
101 table = self.get_object()
102 col_types = get_table_column_types(table)
103 return Response(col_types)
104
105 @action(methods=['post'], detail=True)
106 def previews(self, request, pk=None):
107 table = self.get_object()
108 serializer = TablePreviewSerializer(data=request.data)
109 serializer.is_valid(raise_exception=True)
110 columns = serializer.data["columns"]
111
112 column_names = [col["name"] for col in columns]
113 if not len(column_names) == len(set(column_names)):
114 raise ValidationError("Column names must be distinct")
115 if not len(columns) == len(table.sa_columns):
116 raise ValidationError("Incorrect number of columns in request.")
117
118 table_data = TableSerializer(table, context={"request": request}).data
119 try:
120 preview_records = table.get_preview(columns)
121 except (DataError, IntegrityError) as e:
122 if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:
123 raise ValidationError("Invalid type cast requested.")
124 else:
125 raise APIException
126 except UnsupportedTypeException as e:
127 raise ValidationError(e)
128 except Exception as e:
129 raise APIException(e)
130
131 table_data.update(
132 {
133 # There's no way to reflect actual column data without
134 # creating a view, so we just use the submission, assuming
135 # no errors means we changed to the desired names and types
136 "columns": columns,
137 "records": preview_records
138 }
139 )
140
141 return Response(table_data)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/viewsets/tables.py b/mathesar/api/viewsets/tables.py
--- a/mathesar/api/viewsets/tables.py
+++ b/mathesar/api/viewsets/tables.py
@@ -31,26 +31,15 @@
serializer = TableSerializer(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
- if not serializer.validated_data['name']:
- name = gen_table_name(
- serializer.validated_data['schema'],
- serializer.validated_data['data_files'],
- )
- else:
- name = serializer.validated_data['name']
+ schema = serializer.validated_data['schema']
+ data_files = serializer.validated_data.get('data_files')
+ name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)
try:
- if serializer.validated_data['data_files']:
- table = create_table_from_datafile(
- serializer.validated_data['data_files'],
- name,
- serializer.validated_data['schema'],
- )
+ if data_files:
+ table = create_table_from_datafile(data_files, name, schema)
else:
- table = create_empty_table(
- name,
- serializer.validated_data['schema']
- )
+ table = create_empty_table(name, schema)
except ProgrammingError as e:
if type(e.orig) == DuplicateTable:
raise ValidationError(
| {"golden_diff": "diff --git a/mathesar/api/viewsets/tables.py b/mathesar/api/viewsets/tables.py\n--- a/mathesar/api/viewsets/tables.py\n+++ b/mathesar/api/viewsets/tables.py\n@@ -31,26 +31,15 @@\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n \n- if not serializer.validated_data['name']:\n- name = gen_table_name(\n- serializer.validated_data['schema'],\n- serializer.validated_data['data_files'],\n- )\n- else:\n- name = serializer.validated_data['name']\n+ schema = serializer.validated_data['schema']\n+ data_files = serializer.validated_data.get('data_files')\n+ name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)\n \n try:\n- if serializer.validated_data['data_files']:\n- table = create_table_from_datafile(\n- serializer.validated_data['data_files'],\n- name,\n- serializer.validated_data['schema'],\n- )\n+ if data_files:\n+ table = create_table_from_datafile(data_files, name, schema)\n else:\n- table = create_empty_table(\n- name,\n- serializer.validated_data['schema']\n- )\n+ table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n", "issue": "Implement adding a new empty table\n## Problem\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers may want to create an empty table.\r\n\r\n## Proposed solution\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nThe [\"Basic Table Operations\" design spec](https://wiki.mathesar.org/design/specs/table-operations) has a solution for this, which we need to implement on the frontend. We will be implementing a different design than the one in the spec, which will need to be improvised.\r\n\r\nThis issue involves the following portions of the spec:\r\n- _User Experience_: \r\n - User adds a new table\r\n - User edits a new table name\r\n - User adds an empty table\r\n- _Interactions_: \r\n - Sequential Table Names\r\n \r\nPlease note that we'll need to generate and display a default name of the table that the user can use. Following the logic in #449 is recommended.\r\n\r\n## Additional context\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n- Backend work: #184 \r\n- Design issue: #185\r\n- #449 is related.\r\n\r\nLogs of conversation on Matrix from @pavish:\r\n\r\n> For adding an empty table, instead of showing Empty table option within the tab, I think it might be better to show it on the top.\r\n> \r\n> The Add new table button can be changed to a dropdown with two options. 1. Empty table, 2. Import data.\r\n> \r\n> On clicking empty table, it would just send a create table request and create the table. Then the user can rename it if they want.\r\n> \r\n> This will have clean separation of concerns. Because Import data, involves importing from csv, remote file import and then copy/paste from spreadsheet..\r\n> \r\n> Empty table, would just create a new empty table.\n", "before_files": [{"content": "from django_filters import rest_framework as filters\nfrom psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError, APIException\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError, DataError, IntegrityError\n\nfrom db.types.alteration import UnsupportedTypeException\nfrom mathesar.api.filters import TableFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer\nfrom mathesar.models import Table\nfrom mathesar.utils.tables import (\n get_table_column_types, create_table_from_datafile, create_empty_table,\n gen_table_name\n)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n def create(self, request):\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n\n if not serializer.validated_data['name']:\n name = gen_table_name(\n serializer.validated_data['schema'],\n serializer.validated_data['data_files'],\n )\n else:\n name = serializer.validated_data['name']\n\n try:\n if serializer.validated_data['data_files']:\n table = create_table_from_datafile(\n serializer.validated_data['data_files'],\n name,\n serializer.validated_data['schema'],\n )\n else:\n table = create_empty_table(\n name,\n serializer.validated_data['schema']\n )\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n f\"Relation {request.data['name']} already exists in schema {request.data['schema']}\"\n )\n else:\n raise APIException(e)\n\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = TableSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n table = self.get_object()\n\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in table.MODEL_FIELDS:\n if model_field in serializer.validated_data:\n setattr(table, model_field, serializer.validated_data[model_field])\n present_model_fields.append(model_field)\n table.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del serializer.validated_data[key]\n\n # Save the fields that are stored in the underlying DB.\n try:\n table.update_sa_table(serializer.validated_data)\n except ValueError as e:\n raise ValidationError(e)\n\n # Reload the table to avoid cached properties\n table = self.get_object()\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n table = self.get_object()\n table.delete_sa_table()\n table.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def type_suggestions(self, request, pk=None):\n table = self.get_object()\n col_types = get_table_column_types(table)\n return Response(col_types)\n\n @action(methods=['post'], detail=True)\n def previews(self, request, pk=None):\n table = self.get_object()\n serializer = TablePreviewSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n columns = serializer.data[\"columns\"]\n\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise ValidationError(\"Column names must be distinct\")\n if not len(columns) == len(table.sa_columns):\n raise ValidationError(\"Incorrect number of columns in request.\")\n\n table_data = TableSerializer(table, context={\"request\": request}).data\n try:\n preview_records = table.get_preview(columns)\n except (DataError, IntegrityError) as e:\n if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:\n raise ValidationError(\"Invalid type cast requested.\")\n else:\n raise APIException\n except UnsupportedTypeException as e:\n raise ValidationError(e)\n except Exception as e:\n raise APIException(e)\n\n table_data.update(\n {\n # There's no way to reflect actual column data without\n # creating a view, so we just use the submission, assuming\n # no errors means we changed to the desired names and types\n \"columns\": columns,\n \"records\": preview_records\n }\n )\n\n return Response(table_data)\n", "path": "mathesar/api/viewsets/tables.py"}], "after_files": [{"content": "from django_filters import rest_framework as filters\nfrom psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError, APIException\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError, DataError, IntegrityError\n\nfrom db.types.alteration import UnsupportedTypeException\nfrom mathesar.api.filters import TableFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer\nfrom mathesar.models import Table\nfrom mathesar.utils.tables import (\n get_table_column_types, create_table_from_datafile, create_empty_table,\n gen_table_name\n)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n def create(self, request):\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n\n schema = serializer.validated_data['schema']\n data_files = serializer.validated_data.get('data_files')\n name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)\n\n try:\n if data_files:\n table = create_table_from_datafile(data_files, name, schema)\n else:\n table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n f\"Relation {request.data['name']} already exists in schema {request.data['schema']}\"\n )\n else:\n raise APIException(e)\n\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = TableSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n table = self.get_object()\n\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in table.MODEL_FIELDS:\n if model_field in serializer.validated_data:\n setattr(table, model_field, serializer.validated_data[model_field])\n present_model_fields.append(model_field)\n table.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del serializer.validated_data[key]\n\n # Save the fields that are stored in the underlying DB.\n try:\n table.update_sa_table(serializer.validated_data)\n except ValueError as e:\n raise ValidationError(e)\n\n # Reload the table to avoid cached properties\n table = self.get_object()\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n table = self.get_object()\n table.delete_sa_table()\n table.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def type_suggestions(self, request, pk=None):\n table = self.get_object()\n col_types = get_table_column_types(table)\n return Response(col_types)\n\n @action(methods=['post'], detail=True)\n def previews(self, request, pk=None):\n table = self.get_object()\n serializer = TablePreviewSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n columns = serializer.data[\"columns\"]\n\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise ValidationError(\"Column names must be distinct\")\n if not len(columns) == len(table.sa_columns):\n raise ValidationError(\"Incorrect number of columns in request.\")\n\n table_data = TableSerializer(table, context={\"request\": request}).data\n try:\n preview_records = table.get_preview(columns)\n except (DataError, IntegrityError) as e:\n if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:\n raise ValidationError(\"Invalid type cast requested.\")\n else:\n raise APIException\n except UnsupportedTypeException as e:\n raise ValidationError(e)\n except Exception as e:\n raise APIException(e)\n\n table_data.update(\n {\n # There's no way to reflect actual column data without\n # creating a view, so we just use the submission, assuming\n # no errors means we changed to the desired names and types\n \"columns\": columns,\n \"records\": preview_records\n }\n )\n\n return Response(table_data)\n", "path": "mathesar/api/viewsets/tables.py"}]} | 2,039 | 317 |
gh_patches_debug_54590 | rasdani/github-patches | git_diff | zulip__zulip-20491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove "Send a reply" new user tip
After implementing #19900, there are two places where new users are told how to reply to a message: in the Welcome Bot text and in the "Send a reply" new user tip immediately below.
To simplify and avoid redundancy, we should remove the "Send a reply" new user tip.
<img width="909" alt="Screen_Shot_2021-12-06_at_10_08_14_AM" src="https://user-images.githubusercontent.com/2090066/144938995-080268ce-510d-4b76-b3c1-b691fbb814f4.png">
[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/.22click.20to.20reply.22.20whale)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/hotspots.py`
Content:
```
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from typing import Dict, List
4
5 from django.conf import settings
6 from django.utils.functional import Promise
7 from django.utils.translation import gettext_lazy
8
9 from zerver.models import UserHotspot, UserProfile
10
11 INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
12 "intro_reply": {
13 "title": gettext_lazy("Reply to a message"),
14 "description": gettext_lazy("Click anywhere on a message to reply."),
15 },
16 "intro_streams": {
17 "title": gettext_lazy("Catch up on a stream"),
18 "description": gettext_lazy(
19 "Messages sent to a stream are seen by everyone subscribed "
20 "to that stream. Try clicking on one of the stream links below."
21 ),
22 },
23 "intro_topics": {
24 "title": gettext_lazy("Topics"),
25 "description": gettext_lazy(
26 "Every message has a topic. Topics keep conversations "
27 "easy to follow, and make it easy to reply to conversations that start "
28 "while you are offline."
29 ),
30 },
31 "intro_gear": {
32 "title": gettext_lazy("Settings"),
33 "description": gettext_lazy(
34 "Go to Settings to configure your notifications and display settings."
35 ),
36 },
37 "intro_compose": {
38 "title": gettext_lazy("Compose"),
39 "description": gettext_lazy(
40 "Click here to start a new conversation. Pick a topic "
41 "(2-3 words is best), and give it a go!"
42 ),
43 },
44 }
45
46 # We would most likely implement new hotspots in the future that aren't
47 # a part of the initial tutorial. To that end, classifying them into
48 # categories which are aggregated in ALL_HOTSPOTS, seems like a good start.
49 ALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
50 **INTRO_HOTSPOTS,
51 }
52
53
54 def get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:
55 # For manual testing, it can be convenient to set
56 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
57 # make it easy to click on all of the hotspots. Note that
58 # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link
59 # above) for details.
60 #
61 # Since this is just for development purposes, it's convenient for us to send
62 # all the hotspots rather than any specific category.
63 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
64 return [
65 {
66 "name": hotspot,
67 "title": str(ALL_HOTSPOTS[hotspot]["title"]),
68 "description": str(ALL_HOTSPOTS[hotspot]["description"]),
69 "delay": 0,
70 }
71 for hotspot in ALL_HOTSPOTS
72 ]
73
74 # If a Zulip server has disabled the tutorial, never send hotspots.
75 if not settings.TUTORIAL_ENABLED:
76 return []
77
78 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
79 return []
80
81 seen_hotspots = frozenset(
82 UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)
83 )
84 for hotspot in INTRO_HOTSPOTS.keys():
85 if hotspot not in seen_hotspots:
86 return [
87 {
88 "name": hotspot,
89 "title": str(INTRO_HOTSPOTS[hotspot]["title"]),
90 "description": str(INTRO_HOTSPOTS[hotspot]["description"]),
91 "delay": 0.5,
92 }
93 ]
94
95 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
96 user.save(update_fields=["tutorial_status"])
97 return []
98
99
100 def copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:
101 for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
102 UserHotspot.objects.create(
103 user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp
104 )
105
106 target_profile.tutorial_status = source_profile.tutorial_status
107 target_profile.onboarding_steps = source_profile.onboarding_steps
108 target_profile.save(update_fields=["tutorial_status", "onboarding_steps"])
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -9,10 +9,6 @@
from zerver.models import UserHotspot, UserProfile
INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
- "intro_reply": {
- "title": gettext_lazy("Reply to a message"),
- "description": gettext_lazy("Click anywhere on a message to reply."),
- },
"intro_streams": {
"title": gettext_lazy("Catch up on a stream"),
"description": gettext_lazy(
| {"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -9,10 +9,6 @@\n from zerver.models import UserHotspot, UserProfile\n \n INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n- \"intro_reply\": {\n- \"title\": gettext_lazy(\"Reply to a message\"),\n- \"description\": gettext_lazy(\"Click anywhere on a message to reply.\"),\n- },\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n", "issue": "Remove \"Send a reply\" new user tip\nAfter implementing #19900, there are two places where new users are told how to reply to a message: in the Welcome Bot text and in the \"Send a reply\" new user tip immediately below.\r\n\r\nTo simplify and avoid redundancy, we should remove the \"Send a reply\" new user tip.\r\n\r\n<img width=\"909\" alt=\"Screen_Shot_2021-12-06_at_10_08_14_AM\" src=\"https://user-images.githubusercontent.com/2090066/144938995-080268ce-510d-4b76-b3c1-b691fbb814f4.png\">\r\n\r\n[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/.22click.20to.20reply.22.20whale)\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.functional import Promise\nfrom django.utils.translation import gettext_lazy\n\nfrom zerver.models import UserHotspot, UserProfile\n\nINTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n \"intro_reply\": {\n \"title\": gettext_lazy(\"Reply to a message\"),\n \"description\": gettext_lazy(\"Click anywhere on a message to reply.\"),\n },\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n },\n \"intro_topics\": {\n \"title\": gettext_lazy(\"Topics\"),\n \"description\": gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n },\n \"intro_gear\": {\n \"title\": gettext_lazy(\"Settings\"),\n \"description\": gettext_lazy(\n \"Go to Settings to configure your notifications and display settings.\"\n ),\n },\n \"intro_compose\": {\n \"title\": gettext_lazy(\"Compose\"),\n \"description\": gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n },\n}\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n **INTRO_HOTSPOTS,\n}\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [\n {\n \"name\": hotspot,\n \"title\": str(ALL_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(ALL_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0,\n }\n for hotspot in ALL_HOTSPOTS\n ]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n for hotspot in INTRO_HOTSPOTS.keys():\n if hotspot not in seen_hotspots:\n return [\n {\n \"name\": hotspot,\n \"title\": str(INTRO_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(INTRO_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0.5,\n }\n ]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return []\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}], "after_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.functional import Promise\nfrom django.utils.translation import gettext_lazy\n\nfrom zerver.models import UserHotspot, UserProfile\n\nINTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n },\n \"intro_topics\": {\n \"title\": gettext_lazy(\"Topics\"),\n \"description\": gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n },\n \"intro_gear\": {\n \"title\": gettext_lazy(\"Settings\"),\n \"description\": gettext_lazy(\n \"Go to Settings to configure your notifications and display settings.\"\n ),\n },\n \"intro_compose\": {\n \"title\": gettext_lazy(\"Compose\"),\n \"description\": gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n },\n}\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n **INTRO_HOTSPOTS,\n}\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [\n {\n \"name\": hotspot,\n \"title\": str(ALL_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(ALL_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0,\n }\n for hotspot in ALL_HOTSPOTS\n ]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n for hotspot in INTRO_HOTSPOTS.keys():\n if hotspot not in seen_hotspots:\n return [\n {\n \"name\": hotspot,\n \"title\": str(INTRO_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(INTRO_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0.5,\n }\n ]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return []\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}]} | 1,593 | 143 |
gh_patches_debug_9750 | rasdani/github-patches | git_diff | nilearn__nilearn-4310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] MultiNiftiMapsMasker reports are empty
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Operating system
- [X] Linux
- [ ] Mac
- [ ] Windows
### Operating system version
For example one of the following:
- Linux Ubuntu 22.04
### Python version
- [ ] 3.12
- [X] 3.11
- [ ] 3.10
- [ ] 3.9
- [ ] 3.8
### nilearn version
dev
### Expected behavior
to have non empty reports
### Current behavior & error messages
This is what I got:


### Steps and code to reproduce bug
```python
from pathlib import Path
from nilearn import datasets
from nilearn.maskers import (
MultiNiftiMapsMasker
)
REPORTS_DIR = Path().cwd()
difumo = datasets.fetch_atlas_difumo(
dimension=64, resolution_mm=2, legacy_format=False
)
data = datasets.fetch_development_fmri(n_subjects=2)
masker = MultiNiftiMapsMasker(
maps_img=difumo.maps,
standardize="zscore_sample",
standardize_confounds="zscore_sample",
memory="nilearn_cache",
n_jobs=2,
)
masker.fit()
report = masker.generate_report()
report.save_as_html(REPORTS_DIR / "multi_nifti_maps_masker_atlas.html")
_ = masker.fit_transform(data.func, confounds=data.confounds)
report = masker.generate_report()
report.save_as_html(REPORTS_DIR / "multi_nifti_maps_masker_fitted.html")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/reporting/html_report.py`
Content:
```
1 """Generate HTML reports."""
2
3 import copy
4 import os
5 import warnings
6 from pathlib import Path
7 from string import Template
8
9 from nilearn.externals import tempita
10 from nilearn.maskers import NiftiSpheresMasker
11 from nilearn.plotting.html_document import HTMLDocument
12 from nilearn.reporting.utils import figure_to_svg_base64
13
14 ESTIMATOR_TEMPLATES = {
15 "NiftiLabelsMasker": "report_body_template_niftilabelsmasker.html",
16 "NiftiMapsMasker": "report_body_template_niftimapsmasker.html",
17 "NiftiSpheresMasker": "report_body_template_niftispheresmasker.html",
18 "default": "report_body_template.html",
19 }
20
21
22 def _get_estimator_template(estimator):
23 """Return the HTML template to use for a given estimator \
24 if a specific template was defined in ESTIMATOR_TEMPLATES, \
25 otherwise return the default template.
26
27 Parameters
28 ----------
29 estimator : object instance of BaseEstimator
30 The object we wish to retrieve template of.
31
32 Returns
33 -------
34 template : str
35 Name of the template file to use.
36
37 """
38 if estimator.__class__.__name__ in ESTIMATOR_TEMPLATES:
39 return ESTIMATOR_TEMPLATES[estimator.__class__.__name__]
40 else:
41 return ESTIMATOR_TEMPLATES["default"]
42
43
44 def _embed_img(display):
45 """Embed an image or just return its instance if already embedded.
46
47 Parameters
48 ----------
49 display : obj
50 A Nilearn plotting object to display.
51
52 Returns
53 -------
54 embed : str
55 Binary image string.
56
57 """
58 if display is None: # no image to display
59 return None
60 # If already embedded, simply return as is
61 if isinstance(display, str):
62 return display
63 return figure_to_svg_base64(display.frame_axes.figure)
64
65
66 def _str_params(params):
67 """Convert NoneType values to the string 'None' for display.
68
69 Parameters
70 ----------
71 params : dict
72 A dictionary of input values to a function.
73
74 """
75 params_str = copy.deepcopy(params)
76 for k, v in params_str.items():
77 if v is None:
78 params_str[k] = "None"
79 return params_str
80
81
82 def _update_template(
83 title, docstring, content, overlay, parameters, data, template_name=None
84 ):
85 """Populate a report with content.
86
87 Parameters
88 ----------
89 title : str
90 The title for the report.
91
92 docstring : str
93 The introductory docstring for the reported object.
94
95 content : img
96 The content to display.
97
98 overlay : img
99 Overlaid content, to appear on hover.
100
101 parameters : dict
102 A dictionary of object parameters and their values.
103
104 data : dict
105 A dictionary holding the data to be added to the report.
106 The keys must match exactly the ones used in the template.
107 The default template accepts the following:
108 - description (str) : Description of the content.
109 - warning_message (str) : An optional warning
110 message to be displayed in red. This is used
111 for example when no image was provided to the
112 estimator when fitting.
113 The NiftiLabelsMasker template accepts the additional
114 fields:
115 - summary (dict) : A summary description of the
116 region labels and sizes. This will be displayed
117 as an expandable table in the report.
118
119 template_name : str, optional
120 The name of the template to use. If not provided, the
121 default template `report_body_template.html` will be
122 used.
123
124 Returns
125 -------
126 report : HTMLReport
127 An instance of a populated HTML report.
128
129 """
130 resource_path = Path(__file__).resolve().parent.joinpath("data", "html")
131
132 if template_name is None:
133 body_template_name = "report_body_template.html"
134 else:
135 body_template_name = template_name
136 body_template_path = resource_path.joinpath(body_template_name)
137 if not os.path.exists(str(body_template_path)):
138 raise FileNotFoundError(f"No template {body_template_name}")
139 tpl = tempita.HTMLTemplate.from_filename(
140 str(body_template_path), encoding="utf-8"
141 )
142 body = tpl.substitute(
143 title=title,
144 content=content,
145 overlay=overlay,
146 docstring=docstring,
147 parameters=parameters,
148 **data,
149 )
150
151 head_template_name = "report_head_template.html"
152 head_template_path = resource_path.joinpath(head_template_name)
153 with open(str(head_template_path)) as head_file:
154 head_tpl = Template(head_file.read())
155
156 return HTMLReport(body=body, head_tpl=head_tpl)
157
158
159 def _define_overlay(estimator):
160 """Determine whether an overlay was provided and \
161 update the report text as appropriate."""
162 displays = estimator._reporting()
163
164 if len(displays) == 1: # set overlay to None
165 overlay, image = None, displays[0]
166
167 elif isinstance(estimator, NiftiSpheresMasker):
168 overlay, image = None, displays
169
170 elif len(displays) == 2:
171 overlay, image = displays[0], displays[1]
172
173 else:
174 overlay, image = None, displays
175
176 return overlay, image
177
178
179 def generate_report(estimator):
180 """Generate a report for Nilearn objects.
181
182 Reports are useful to visualize steps in a processing pipeline.
183 Example use case: visualize the overlap of a mask and reference image
184 in NiftiMasker.
185
186 Parameters
187 ----------
188 estimator : Object instance of BaseEstimator.
189 Object for which the report should be generated.
190
191 Returns
192 -------
193 report : HTMLReport
194
195 """
196 if hasattr(estimator, "_report_content"):
197 data = estimator._report_content
198 else:
199 data = {}
200 if not hasattr(estimator, "_reporting_data"):
201 warnings.warn(
202 "This object has not been fitted yet ! "
203 "Make sure to run `fit` before inspecting reports."
204 )
205 return _update_template(
206 title="Empty Report",
207 docstring=(
208 "This report was not generated. Please `fit` the object."
209 ),
210 content=_embed_img(None),
211 overlay=None,
212 parameters={},
213 data=data,
214 )
215
216 elif estimator._reporting_data is None:
217 warnings.warn(
218 "Report generation not enabled ! "
219 "No visual outputs will be created."
220 )
221 return _update_template(
222 title="Empty Report",
223 docstring=(
224 "This report was not "
225 "generated. Please check "
226 "that reporting is enabled."
227 ),
228 content=_embed_img(None),
229 overlay=None,
230 parameters={},
231 data=data,
232 )
233
234 return _create_report(estimator, data)
235
236
237 def _create_report(estimator, data):
238 html_template = _get_estimator_template(estimator)
239 overlay, image = _define_overlay(estimator)
240 embeded_images = (
241 [_embed_img(i) for i in image]
242 if isinstance(image, list)
243 else _embed_img(image)
244 )
245 parameters = _str_params(estimator.get_params())
246 docstring = estimator.__doc__
247 snippet = docstring.partition("Parameters\n ----------\n")[0]
248 return _update_template(
249 title=estimator.__class__.__name__,
250 docstring=snippet,
251 content=embeded_images,
252 overlay=_embed_img(overlay),
253 parameters=parameters,
254 data=data,
255 template_name=html_template,
256 )
257
258
259 class HTMLReport(HTMLDocument):
260 """A report written as HTML.
261
262 Methods such as ``save_as_html``, or ``open_in_browser``
263 are inherited from class ``nilearn.plotting.html_document.HTMLDocument``.
264
265 """
266
267 def __init__(self, head_tpl, body, head_values={}):
268 """Construct the ``HTMLReport`` class.
269
270 Parameters
271 ----------
272 head_tpl : Template
273 This is meant for display as a full page, eg writing on disk.
274 This is the Template object used to generate the HTML head
275 section of the report. The template should be filled with:
276
277 - title: The title of the HTML page.
278 - body: The full body of the HTML page. Provided through
279 the ``body`` input.
280
281 body : :obj:`str`
282 This parameter is used for embedding in the provided
283 ``head_tpl`` template. It contains the full body of the
284 HTML page.
285
286 head_values : :obj:`dict`, default={}
287 Additional substitutions in ``head_tpl``.
288
289 .. note::
290 This can be used to provide additional values
291 with custom templates.
292
293 """
294 html = head_tpl.safe_substitute(body=body, **head_values)
295 super().__init__(html)
296 self.head_tpl = head_tpl
297 self.body = body
298
299 def _repr_html_(self):
300 """Return body of the report.
301
302 Method used by the Jupyter notebook.
303 Users normally won't call this method explicitly.
304 """
305 return self.body
306
307 def __str__(self):
308 return self.body
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nilearn/reporting/html_report.py b/nilearn/reporting/html_report.py
--- a/nilearn/reporting/html_report.py
+++ b/nilearn/reporting/html_report.py
@@ -13,7 +13,9 @@
ESTIMATOR_TEMPLATES = {
"NiftiLabelsMasker": "report_body_template_niftilabelsmasker.html",
+ "MultiNiftiLabelsMasker": "report_body_template_niftilabelsmasker.html",
"NiftiMapsMasker": "report_body_template_niftimapsmasker.html",
+ "MultiNiftiMapsMasker": "report_body_template_niftimapsmasker.html",
"NiftiSpheresMasker": "report_body_template_niftispheresmasker.html",
"default": "report_body_template.html",
}
| {"golden_diff": "diff --git a/nilearn/reporting/html_report.py b/nilearn/reporting/html_report.py\n--- a/nilearn/reporting/html_report.py\n+++ b/nilearn/reporting/html_report.py\n@@ -13,7 +13,9 @@\n \n ESTIMATOR_TEMPLATES = {\n \"NiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n+ \"MultiNiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n \"NiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n+ \"MultiNiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n \"NiftiSpheresMasker\": \"report_body_template_niftispheresmasker.html\",\n \"default\": \"report_body_template.html\",\n }\n", "issue": "[BUG] MultiNiftiMapsMasker reports are empty\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Operating system\n\n- [X] Linux\n- [ ] Mac\n- [ ] Windows\n\n### Operating system version\n\nFor example one of the following:\r\n- Linux Ubuntu 22.04\r\n\r\n\n\n### Python version\n\n- [ ] 3.12\n- [X] 3.11\n- [ ] 3.10\n- [ ] 3.9\n- [ ] 3.8\n\n### nilearn version\n\ndev\n\n### Expected behavior\n\nto have non empty reports\n\n### Current behavior & error messages\n\nThis is what I got:\r\n\r\n\r\n\r\n\r\n\r\n\n\n### Steps and code to reproduce bug\n\n```python\r\nfrom pathlib import Path\r\nfrom nilearn import datasets\r\nfrom nilearn.maskers import (\r\n MultiNiftiMapsMasker\r\n)\r\n\r\nREPORTS_DIR = Path().cwd()\r\n\r\ndifumo = datasets.fetch_atlas_difumo(\r\n dimension=64, resolution_mm=2, legacy_format=False\r\n)\r\n\r\ndata = datasets.fetch_development_fmri(n_subjects=2)\r\n\r\nmasker = MultiNiftiMapsMasker(\r\n maps_img=difumo.maps,\r\n standardize=\"zscore_sample\",\r\n standardize_confounds=\"zscore_sample\",\r\n memory=\"nilearn_cache\",\r\n n_jobs=2,\r\n)\r\n\r\nmasker.fit()\r\nreport = masker.generate_report()\r\nreport.save_as_html(REPORTS_DIR / \"multi_nifti_maps_masker_atlas.html\")\r\n\r\n_ = masker.fit_transform(data.func, confounds=data.confounds)\r\nreport = masker.generate_report()\r\nreport.save_as_html(REPORTS_DIR / \"multi_nifti_maps_masker_fitted.html\")\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Generate HTML reports.\"\"\"\n\nimport copy\nimport os\nimport warnings\nfrom pathlib import Path\nfrom string import Template\n\nfrom nilearn.externals import tempita\nfrom nilearn.maskers import NiftiSpheresMasker\nfrom nilearn.plotting.html_document import HTMLDocument\nfrom nilearn.reporting.utils import figure_to_svg_base64\n\nESTIMATOR_TEMPLATES = {\n \"NiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n \"NiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n \"NiftiSpheresMasker\": \"report_body_template_niftispheresmasker.html\",\n \"default\": \"report_body_template.html\",\n}\n\n\ndef _get_estimator_template(estimator):\n \"\"\"Return the HTML template to use for a given estimator \\\n if a specific template was defined in ESTIMATOR_TEMPLATES, \\\n otherwise return the default template.\n\n Parameters\n ----------\n estimator : object instance of BaseEstimator\n The object we wish to retrieve template of.\n\n Returns\n -------\n template : str\n Name of the template file to use.\n\n \"\"\"\n if estimator.__class__.__name__ in ESTIMATOR_TEMPLATES:\n return ESTIMATOR_TEMPLATES[estimator.__class__.__name__]\n else:\n return ESTIMATOR_TEMPLATES[\"default\"]\n\n\ndef _embed_img(display):\n \"\"\"Embed an image or just return its instance if already embedded.\n\n Parameters\n ----------\n display : obj\n A Nilearn plotting object to display.\n\n Returns\n -------\n embed : str\n Binary image string.\n\n \"\"\"\n if display is None: # no image to display\n return None\n # If already embedded, simply return as is\n if isinstance(display, str):\n return display\n return figure_to_svg_base64(display.frame_axes.figure)\n\n\ndef _str_params(params):\n \"\"\"Convert NoneType values to the string 'None' for display.\n\n Parameters\n ----------\n params : dict\n A dictionary of input values to a function.\n\n \"\"\"\n params_str = copy.deepcopy(params)\n for k, v in params_str.items():\n if v is None:\n params_str[k] = \"None\"\n return params_str\n\n\ndef _update_template(\n title, docstring, content, overlay, parameters, data, template_name=None\n):\n \"\"\"Populate a report with content.\n\n Parameters\n ----------\n title : str\n The title for the report.\n\n docstring : str\n The introductory docstring for the reported object.\n\n content : img\n The content to display.\n\n overlay : img\n Overlaid content, to appear on hover.\n\n parameters : dict\n A dictionary of object parameters and their values.\n\n data : dict\n A dictionary holding the data to be added to the report.\n The keys must match exactly the ones used in the template.\n The default template accepts the following:\n - description (str) : Description of the content.\n - warning_message (str) : An optional warning\n message to be displayed in red. This is used\n for example when no image was provided to the\n estimator when fitting.\n The NiftiLabelsMasker template accepts the additional\n fields:\n - summary (dict) : A summary description of the\n region labels and sizes. This will be displayed\n as an expandable table in the report.\n\n template_name : str, optional\n The name of the template to use. If not provided, the\n default template `report_body_template.html` will be\n used.\n\n Returns\n -------\n report : HTMLReport\n An instance of a populated HTML report.\n\n \"\"\"\n resource_path = Path(__file__).resolve().parent.joinpath(\"data\", \"html\")\n\n if template_name is None:\n body_template_name = \"report_body_template.html\"\n else:\n body_template_name = template_name\n body_template_path = resource_path.joinpath(body_template_name)\n if not os.path.exists(str(body_template_path)):\n raise FileNotFoundError(f\"No template {body_template_name}\")\n tpl = tempita.HTMLTemplate.from_filename(\n str(body_template_path), encoding=\"utf-8\"\n )\n body = tpl.substitute(\n title=title,\n content=content,\n overlay=overlay,\n docstring=docstring,\n parameters=parameters,\n **data,\n )\n\n head_template_name = \"report_head_template.html\"\n head_template_path = resource_path.joinpath(head_template_name)\n with open(str(head_template_path)) as head_file:\n head_tpl = Template(head_file.read())\n\n return HTMLReport(body=body, head_tpl=head_tpl)\n\n\ndef _define_overlay(estimator):\n \"\"\"Determine whether an overlay was provided and \\\n update the report text as appropriate.\"\"\"\n displays = estimator._reporting()\n\n if len(displays) == 1: # set overlay to None\n overlay, image = None, displays[0]\n\n elif isinstance(estimator, NiftiSpheresMasker):\n overlay, image = None, displays\n\n elif len(displays) == 2:\n overlay, image = displays[0], displays[1]\n\n else:\n overlay, image = None, displays\n\n return overlay, image\n\n\ndef generate_report(estimator):\n \"\"\"Generate a report for Nilearn objects.\n\n Reports are useful to visualize steps in a processing pipeline.\n Example use case: visualize the overlap of a mask and reference image\n in NiftiMasker.\n\n Parameters\n ----------\n estimator : Object instance of BaseEstimator.\n Object for which the report should be generated.\n\n Returns\n -------\n report : HTMLReport\n\n \"\"\"\n if hasattr(estimator, \"_report_content\"):\n data = estimator._report_content\n else:\n data = {}\n if not hasattr(estimator, \"_reporting_data\"):\n warnings.warn(\n \"This object has not been fitted yet ! \"\n \"Make sure to run `fit` before inspecting reports.\"\n )\n return _update_template(\n title=\"Empty Report\",\n docstring=(\n \"This report was not generated. Please `fit` the object.\"\n ),\n content=_embed_img(None),\n overlay=None,\n parameters={},\n data=data,\n )\n\n elif estimator._reporting_data is None:\n warnings.warn(\n \"Report generation not enabled ! \"\n \"No visual outputs will be created.\"\n )\n return _update_template(\n title=\"Empty Report\",\n docstring=(\n \"This report was not \"\n \"generated. Please check \"\n \"that reporting is enabled.\"\n ),\n content=_embed_img(None),\n overlay=None,\n parameters={},\n data=data,\n )\n\n return _create_report(estimator, data)\n\n\ndef _create_report(estimator, data):\n html_template = _get_estimator_template(estimator)\n overlay, image = _define_overlay(estimator)\n embeded_images = (\n [_embed_img(i) for i in image]\n if isinstance(image, list)\n else _embed_img(image)\n )\n parameters = _str_params(estimator.get_params())\n docstring = estimator.__doc__\n snippet = docstring.partition(\"Parameters\\n ----------\\n\")[0]\n return _update_template(\n title=estimator.__class__.__name__,\n docstring=snippet,\n content=embeded_images,\n overlay=_embed_img(overlay),\n parameters=parameters,\n data=data,\n template_name=html_template,\n )\n\n\nclass HTMLReport(HTMLDocument):\n \"\"\"A report written as HTML.\n\n Methods such as ``save_as_html``, or ``open_in_browser``\n are inherited from class ``nilearn.plotting.html_document.HTMLDocument``.\n\n \"\"\"\n\n def __init__(self, head_tpl, body, head_values={}):\n \"\"\"Construct the ``HTMLReport`` class.\n\n Parameters\n ----------\n head_tpl : Template\n This is meant for display as a full page, eg writing on disk.\n This is the Template object used to generate the HTML head\n section of the report. The template should be filled with:\n\n - title: The title of the HTML page.\n - body: The full body of the HTML page. Provided through\n the ``body`` input.\n\n body : :obj:`str`\n This parameter is used for embedding in the provided\n ``head_tpl`` template. It contains the full body of the\n HTML page.\n\n head_values : :obj:`dict`, default={}\n Additional substitutions in ``head_tpl``.\n\n .. note::\n This can be used to provide additional values\n with custom templates.\n\n \"\"\"\n html = head_tpl.safe_substitute(body=body, **head_values)\n super().__init__(html)\n self.head_tpl = head_tpl\n self.body = body\n\n def _repr_html_(self):\n \"\"\"Return body of the report.\n\n Method used by the Jupyter notebook.\n Users normally won't call this method explicitly.\n \"\"\"\n return self.body\n\n def __str__(self):\n return self.body\n", "path": "nilearn/reporting/html_report.py"}], "after_files": [{"content": "\"\"\"Generate HTML reports.\"\"\"\n\nimport copy\nimport os\nimport warnings\nfrom pathlib import Path\nfrom string import Template\n\nfrom nilearn.externals import tempita\nfrom nilearn.maskers import NiftiSpheresMasker\nfrom nilearn.plotting.html_document import HTMLDocument\nfrom nilearn.reporting.utils import figure_to_svg_base64\n\nESTIMATOR_TEMPLATES = {\n \"NiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n \"MultiNiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n \"NiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n \"MultiNiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n \"NiftiSpheresMasker\": \"report_body_template_niftispheresmasker.html\",\n \"default\": \"report_body_template.html\",\n}\n\n\ndef _get_estimator_template(estimator):\n \"\"\"Return the HTML template to use for a given estimator \\\n if a specific template was defined in ESTIMATOR_TEMPLATES, \\\n otherwise return the default template.\n\n Parameters\n ----------\n estimator : object instance of BaseEstimator\n The object we wish to retrieve template of.\n\n Returns\n -------\n template : str\n Name of the template file to use.\n\n \"\"\"\n if estimator.__class__.__name__ in ESTIMATOR_TEMPLATES:\n return ESTIMATOR_TEMPLATES[estimator.__class__.__name__]\n else:\n return ESTIMATOR_TEMPLATES[\"default\"]\n\n\ndef _embed_img(display):\n \"\"\"Embed an image or just return its instance if already embedded.\n\n Parameters\n ----------\n display : obj\n A Nilearn plotting object to display.\n\n Returns\n -------\n embed : str\n Binary image string.\n\n \"\"\"\n if display is None: # no image to display\n return None\n # If already embedded, simply return as is\n if isinstance(display, str):\n return display\n return figure_to_svg_base64(display.frame_axes.figure)\n\n\ndef _str_params(params):\n \"\"\"Convert NoneType values to the string 'None' for display.\n\n Parameters\n ----------\n params : dict\n A dictionary of input values to a function.\n\n \"\"\"\n params_str = copy.deepcopy(params)\n for k, v in params_str.items():\n if v is None:\n params_str[k] = \"None\"\n return params_str\n\n\ndef _update_template(\n title, docstring, content, overlay, parameters, data, template_name=None\n):\n \"\"\"Populate a report with content.\n\n Parameters\n ----------\n title : str\n The title for the report.\n\n docstring : str\n The introductory docstring for the reported object.\n\n content : img\n The content to display.\n\n overlay : img\n Overlaid content, to appear on hover.\n\n parameters : dict\n A dictionary of object parameters and their values.\n\n data : dict\n A dictionary holding the data to be added to the report.\n The keys must match exactly the ones used in the template.\n The default template accepts the following:\n - description (str) : Description of the content.\n - warning_message (str) : An optional warning\n message to be displayed in red. This is used\n for example when no image was provided to the\n estimator when fitting.\n The NiftiLabelsMasker template accepts the additional\n fields:\n - summary (dict) : A summary description of the\n region labels and sizes. This will be displayed\n as an expandable table in the report.\n\n template_name : str, optional\n The name of the template to use. If not provided, the\n default template `report_body_template.html` will be\n used.\n\n Returns\n -------\n report : HTMLReport\n An instance of a populated HTML report.\n\n \"\"\"\n resource_path = Path(__file__).resolve().parent.joinpath(\"data\", \"html\")\n\n if template_name is None:\n body_template_name = \"report_body_template.html\"\n else:\n body_template_name = template_name\n body_template_path = resource_path.joinpath(body_template_name)\n if not os.path.exists(str(body_template_path)):\n raise FileNotFoundError(f\"No template {body_template_name}\")\n tpl = tempita.HTMLTemplate.from_filename(\n str(body_template_path), encoding=\"utf-8\"\n )\n body = tpl.substitute(\n title=title,\n content=content,\n overlay=overlay,\n docstring=docstring,\n parameters=parameters,\n **data,\n )\n\n head_template_name = \"report_head_template.html\"\n head_template_path = resource_path.joinpath(head_template_name)\n with open(str(head_template_path)) as head_file:\n head_tpl = Template(head_file.read())\n\n return HTMLReport(body=body, head_tpl=head_tpl)\n\n\ndef _define_overlay(estimator):\n \"\"\"Determine whether an overlay was provided and \\\n update the report text as appropriate.\"\"\"\n displays = estimator._reporting()\n\n if len(displays) == 1: # set overlay to None\n overlay, image = None, displays[0]\n\n elif isinstance(estimator, NiftiSpheresMasker):\n overlay, image = None, displays\n\n elif len(displays) == 2:\n overlay, image = displays[0], displays[1]\n\n else:\n overlay, image = None, displays\n\n return overlay, image\n\n\ndef generate_report(estimator):\n \"\"\"Generate a report for Nilearn objects.\n\n Reports are useful to visualize steps in a processing pipeline.\n Example use case: visualize the overlap of a mask and reference image\n in NiftiMasker.\n\n Parameters\n ----------\n estimator : Object instance of BaseEstimator.\n Object for which the report should be generated.\n\n Returns\n -------\n report : HTMLReport\n\n \"\"\"\n if hasattr(estimator, \"_report_content\"):\n data = estimator._report_content\n else:\n data = {}\n if not hasattr(estimator, \"_reporting_data\"):\n warnings.warn(\n \"This object has not been fitted yet ! \"\n \"Make sure to run `fit` before inspecting reports.\"\n )\n return _update_template(\n title=\"Empty Report\",\n docstring=(\n \"This report was not generated. Please `fit` the object.\"\n ),\n content=_embed_img(None),\n overlay=None,\n parameters={},\n data=data,\n )\n\n elif estimator._reporting_data is None:\n warnings.warn(\n \"Report generation not enabled ! \"\n \"No visual outputs will be created.\"\n )\n return _update_template(\n title=\"Empty Report\",\n docstring=(\n \"This report was not \"\n \"generated. Please check \"\n \"that reporting is enabled.\"\n ),\n content=_embed_img(None),\n overlay=None,\n parameters={},\n data=data,\n )\n\n return _create_report(estimator, data)\n\n\ndef _create_report(estimator, data):\n html_template = _get_estimator_template(estimator)\n overlay, image = _define_overlay(estimator)\n embeded_images = (\n [_embed_img(i) for i in image]\n if isinstance(image, list)\n else _embed_img(image)\n )\n parameters = _str_params(estimator.get_params())\n docstring = estimator.__doc__\n snippet = docstring.partition(\"Parameters\\n ----------\\n\")[0]\n return _update_template(\n title=estimator.__class__.__name__,\n docstring=snippet,\n content=embeded_images,\n overlay=_embed_img(overlay),\n parameters=parameters,\n data=data,\n template_name=html_template,\n )\n\n\nclass HTMLReport(HTMLDocument):\n \"\"\"A report written as HTML.\n\n Methods such as ``save_as_html``, or ``open_in_browser``\n are inherited from class ``nilearn.plotting.html_document.HTMLDocument``.\n\n \"\"\"\n\n def __init__(self, head_tpl, body, head_values={}):\n \"\"\"Construct the ``HTMLReport`` class.\n\n Parameters\n ----------\n head_tpl : Template\n This is meant for display as a full page, eg writing on disk.\n This is the Template object used to generate the HTML head\n section of the report. The template should be filled with:\n\n - title: The title of the HTML page.\n - body: The full body of the HTML page. Provided through\n the ``body`` input.\n\n body : :obj:`str`\n This parameter is used for embedding in the provided\n ``head_tpl`` template. It contains the full body of the\n HTML page.\n\n head_values : :obj:`dict`, default={}\n Additional substitutions in ``head_tpl``.\n\n .. note::\n This can be used to provide additional values\n with custom templates.\n\n \"\"\"\n html = head_tpl.safe_substitute(body=body, **head_values)\n super().__init__(html)\n self.head_tpl = head_tpl\n self.body = body\n\n def _repr_html_(self):\n \"\"\"Return body of the report.\n\n Method used by the Jupyter notebook.\n Users normally won't call this method explicitly.\n \"\"\"\n return self.body\n\n def __str__(self):\n return self.body\n", "path": "nilearn/reporting/html_report.py"}]} | 3,557 | 188 |
gh_patches_debug_27721 | rasdani/github-patches | git_diff | Flexget__Flexget-2513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Notify (Pushbullet) Plugin Error.
Hi, since last week i have this error in my log. Up to this point everything worked without problems.
```
2019-11-13 10:30 ERROR notify_entry NexBox 'x-ratelimit-reset'
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notify.py", line 104, in send_notification
send_notification(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notification_framework.py", line 124, in send_notification
title, message, rendered_config
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py", line 89, in notify
self.send_push(key, title, message, config.get('url'), d, 'device_iden')
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py", line 130, in send_push
int(response.headers['X-Ratelimit-Reset'])
File "/usr/local/lib/python3.5/dist-packages/requests/structures.py", line 52, in __getitem__
return self._store[key.lower()][1]
KeyError: 'x-ratelimit-reset'
```
Flexget: 2.21.32
API: 1.5.0
Same with Flexget 3.0.8 and Python 3.8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/components/notify/notifiers/pushbullet.py`
Content:
```
1 import base64
2 import datetime
3 import logging
4
5 from requests.exceptions import RequestException
6
7 from flexget import plugin
8 from flexget.config_schema import one_or_more
9 from flexget.event import event
10 from flexget.plugin import PluginWarning
11 from flexget.utils.requests import Session as RequestSession
12 from flexget.utils.requests import TimedLimiter
13
14 plugin_name = 'pushbullet'
15 log = logging.getLogger(plugin_name)
16
17 PUSHBULLET_URL = 'https://api.pushbullet.com/v2/pushes'
18
19 requests = RequestSession(max_retries=3)
20 requests.add_domain_limiter(TimedLimiter('pushbullet.com', '5 seconds'))
21
22
23 class PushbulletNotifier:
24 """
25 Example::
26
27 notify:
28 entries:
29 via:
30 pushbullet:
31 apikey: <API_KEY>
32 [device: <DEVICE_IDEN> (can also be a list of device ids, or don't specify any ids to send to all devices)]
33 [email: <EMAIL_ADDRESS> (can also be a list of user email addresses)]
34 [channel: <CHANNEL_TAG> (you can only specify device / email or channel tag, cannot use both)]
35
36 Configuration parameters are also supported from entries (eg. through set).
37 """
38
39 schema = {
40 'type': 'object',
41 'properties': {
42 'api_key': one_or_more({'type': 'string'}),
43 'device': one_or_more({'type': 'string'}),
44 'email': one_or_more({'type': 'string', 'format': 'email'}),
45 'url': {'type': 'string'},
46 'channel': {'type': 'string'},
47 'file_template': {'type': 'string'},
48 },
49 'required': ['api_key'],
50 'oneOf': [
51 {'required': ['device']},
52 {'required': ['channel']},
53 {'required': ['email']},
54 {
55 'not': {
56 'anyOf': [
57 {'required': ['device']},
58 {'required': ['channel']},
59 {'required': ['email']},
60 ]
61 }
62 },
63 ],
64 'error_oneOf': 'One (and only one) of `email`, `device` or `channel` are allowed.',
65 'additionalProperties': False,
66 }
67
68 def notify(self, title, message, config):
69 """
70 Send a Pushbullet notification
71 """
72 if config.get('device') and not isinstance(config['device'], list):
73 config['device'] = [config['device']]
74
75 if config.get('email') and not isinstance(config['email'], list):
76 config['email'] = [config['email']]
77
78 if not isinstance(config['api_key'], list):
79 config['api_key'] = [config['api_key']]
80
81 for key in config['api_key']:
82 if config.get('channel'):
83 self.send_push(
84 key, title, message, config.get('url'), config.get('channel'), 'channel_tag'
85 )
86 elif config.get('device'):
87 for d in config['device']:
88 self.send_push(key, title, message, config.get('url'), d, 'device_iden')
89 elif config.get('email'):
90 for e in config['email']:
91 self.send_push(key, title, message, config.get('url'), e, 'email')
92 else:
93 self.send_push(key, title, message, config.get('url'))
94
95 def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):
96 push_type = 'link' if url else 'note'
97 notification = {'type': push_type, 'title': title, 'body': body}
98 if url:
99 notification['url'] = url
100 if destination:
101 notification[destination_type] = destination
102
103 # Make the request
104 headers = {
105 'Authorization': b'Basic ' + base64.b64encode(api_key.encode('ascii')),
106 'Content-Type': 'application/json',
107 'Accept': 'application/json',
108 'User-Agent': 'Flexget',
109 }
110 try:
111 response = requests.post(PUSHBULLET_URL, headers=headers, json=notification)
112 except RequestException as e:
113 if e.response is not None:
114 if e.response.status_code == 429:
115 reset_time = e.response.headers.get('X-Ratelimit-Reset')
116 if reset_time:
117 reset_time = datetime.datetime.fromtimestamp(int(reset_time)).strftime(
118 '%Y-%m-%d %H:%M:%S'
119 )
120 message = f'Monthly Pushbullet database operations limit reached. Next reset: {reset_time}'
121 else:
122 message = e.response.json()['error']['message']
123 else:
124 message = str(e)
125 raise PluginWarning(message)
126
127 reset_time = datetime.datetime.fromtimestamp(
128 int(response.headers['X-Ratelimit-Reset'])
129 ).strftime('%Y-%m-%d %H:%M:%S')
130 remaining = response.headers['X-Ratelimit-Remaining']
131 log.debug(
132 'Pushbullet notification sent. Database operations remaining until next reset: %s. '
133 'Next reset at: %s',
134 remaining,
135 reset_time,
136 )
137
138
139 @event('plugin.register')
140 def register_plugin():
141 plugin.register(PushbulletNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/components/notify/notifiers/pushbullet.py b/flexget/components/notify/notifiers/pushbullet.py
--- a/flexget/components/notify/notifiers/pushbullet.py
+++ b/flexget/components/notify/notifiers/pushbullet.py
@@ -92,7 +92,8 @@
else:
self.send_push(key, title, message, config.get('url'))
- def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):
+ @staticmethod
+ def send_push(api_key, title, body, url=None, destination=None, destination_type=None):
push_type = 'link' if url else 'note'
notification = {'type': push_type, 'title': title, 'body': body}
if url:
@@ -124,16 +125,16 @@
message = str(e)
raise PluginWarning(message)
- reset_time = datetime.datetime.fromtimestamp(
- int(response.headers['X-Ratelimit-Reset'])
- ).strftime('%Y-%m-%d %H:%M:%S')
- remaining = response.headers['X-Ratelimit-Remaining']
- log.debug(
- 'Pushbullet notification sent. Database operations remaining until next reset: %s. '
- 'Next reset at: %s',
- remaining,
- reset_time,
- )
+ reset_time = response.headers.get('X-Ratelimit-Reset')
+ remaining = response.headers.get('X-Ratelimit-Remaining')
+ if reset_time and remaining:
+ reset_time = datetime.datetime.fromtimestamp(int(reset_time))
+ log.debug(
+ 'Pushbullet notification sent. Database operations remaining until next reset: %s. '
+ 'Next reset at: %s',
+ remaining,
+ reset_time,
+ )
@event('plugin.register')
| {"golden_diff": "diff --git a/flexget/components/notify/notifiers/pushbullet.py b/flexget/components/notify/notifiers/pushbullet.py\n--- a/flexget/components/notify/notifiers/pushbullet.py\n+++ b/flexget/components/notify/notifiers/pushbullet.py\n@@ -92,7 +92,8 @@\n else:\n self.send_push(key, title, message, config.get('url'))\n \n- def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):\n+ @staticmethod\n+ def send_push(api_key, title, body, url=None, destination=None, destination_type=None):\n push_type = 'link' if url else 'note'\n notification = {'type': push_type, 'title': title, 'body': body}\n if url:\n@@ -124,16 +125,16 @@\n message = str(e)\n raise PluginWarning(message)\n \n- reset_time = datetime.datetime.fromtimestamp(\n- int(response.headers['X-Ratelimit-Reset'])\n- ).strftime('%Y-%m-%d %H:%M:%S')\n- remaining = response.headers['X-Ratelimit-Remaining']\n- log.debug(\n- 'Pushbullet notification sent. Database operations remaining until next reset: %s. '\n- 'Next reset at: %s',\n- remaining,\n- reset_time,\n- )\n+ reset_time = response.headers.get('X-Ratelimit-Reset')\n+ remaining = response.headers.get('X-Ratelimit-Remaining')\n+ if reset_time and remaining:\n+ reset_time = datetime.datetime.fromtimestamp(int(reset_time))\n+ log.debug(\n+ 'Pushbullet notification sent. Database operations remaining until next reset: %s. '\n+ 'Next reset at: %s',\n+ remaining,\n+ reset_time,\n+ )\n \n \n @event('plugin.register')\n", "issue": "Notify (Pushbullet) Plugin Error.\nHi, since last week i have this error in my log. Up to this point everything worked without problems.\r\n```\r\n2019-11-13 10:30 ERROR notify_entry NexBox 'x-ratelimit-reset'\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notify.py\", line 104, in send_notification\r\n send_notification(*args, **kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notification_framework.py\", line 124, in send_notification\r\n title, message, rendered_config\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py\", line 89, in notify\r\n self.send_push(key, title, message, config.get('url'), d, 'device_iden')\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py\", line 130, in send_push\r\n int(response.headers['X-Ratelimit-Reset'])\r\n File \"/usr/local/lib/python3.5/dist-packages/requests/structures.py\", line 52, in __getitem__\r\n return self._store[key.lower()][1]\r\nKeyError: 'x-ratelimit-reset'\r\n```\r\nFlexget: 2.21.32\r\nAPI: 1.5.0\r\n\r\nSame with Flexget 3.0.8 and Python 3.8\n", "before_files": [{"content": "import base64\nimport datetime\nimport logging\n\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\nfrom flexget.plugin import PluginWarning\nfrom flexget.utils.requests import Session as RequestSession\nfrom flexget.utils.requests import TimedLimiter\n\nplugin_name = 'pushbullet'\nlog = logging.getLogger(plugin_name)\n\nPUSHBULLET_URL = 'https://api.pushbullet.com/v2/pushes'\n\nrequests = RequestSession(max_retries=3)\nrequests.add_domain_limiter(TimedLimiter('pushbullet.com', '5 seconds'))\n\n\nclass PushbulletNotifier:\n \"\"\"\n Example::\n\n notify:\n entries:\n via:\n pushbullet:\n apikey: <API_KEY>\n [device: <DEVICE_IDEN> (can also be a list of device ids, or don't specify any ids to send to all devices)]\n [email: <EMAIL_ADDRESS> (can also be a list of user email addresses)]\n [channel: <CHANNEL_TAG> (you can only specify device / email or channel tag, cannot use both)]\n\n Configuration parameters are also supported from entries (eg. through set).\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'api_key': one_or_more({'type': 'string'}),\n 'device': one_or_more({'type': 'string'}),\n 'email': one_or_more({'type': 'string', 'format': 'email'}),\n 'url': {'type': 'string'},\n 'channel': {'type': 'string'},\n 'file_template': {'type': 'string'},\n },\n 'required': ['api_key'],\n 'oneOf': [\n {'required': ['device']},\n {'required': ['channel']},\n {'required': ['email']},\n {\n 'not': {\n 'anyOf': [\n {'required': ['device']},\n {'required': ['channel']},\n {'required': ['email']},\n ]\n }\n },\n ],\n 'error_oneOf': 'One (and only one) of `email`, `device` or `channel` are allowed.',\n 'additionalProperties': False,\n }\n\n def notify(self, title, message, config):\n \"\"\"\n Send a Pushbullet notification\n \"\"\"\n if config.get('device') and not isinstance(config['device'], list):\n config['device'] = [config['device']]\n\n if config.get('email') and not isinstance(config['email'], list):\n config['email'] = [config['email']]\n\n if not isinstance(config['api_key'], list):\n config['api_key'] = [config['api_key']]\n\n for key in config['api_key']:\n if config.get('channel'):\n self.send_push(\n key, title, message, config.get('url'), config.get('channel'), 'channel_tag'\n )\n elif config.get('device'):\n for d in config['device']:\n self.send_push(key, title, message, config.get('url'), d, 'device_iden')\n elif config.get('email'):\n for e in config['email']:\n self.send_push(key, title, message, config.get('url'), e, 'email')\n else:\n self.send_push(key, title, message, config.get('url'))\n\n def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):\n push_type = 'link' if url else 'note'\n notification = {'type': push_type, 'title': title, 'body': body}\n if url:\n notification['url'] = url\n if destination:\n notification[destination_type] = destination\n\n # Make the request\n headers = {\n 'Authorization': b'Basic ' + base64.b64encode(api_key.encode('ascii')),\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'User-Agent': 'Flexget',\n }\n try:\n response = requests.post(PUSHBULLET_URL, headers=headers, json=notification)\n except RequestException as e:\n if e.response is not None:\n if e.response.status_code == 429:\n reset_time = e.response.headers.get('X-Ratelimit-Reset')\n if reset_time:\n reset_time = datetime.datetime.fromtimestamp(int(reset_time)).strftime(\n '%Y-%m-%d %H:%M:%S'\n )\n message = f'Monthly Pushbullet database operations limit reached. Next reset: {reset_time}'\n else:\n message = e.response.json()['error']['message']\n else:\n message = str(e)\n raise PluginWarning(message)\n\n reset_time = datetime.datetime.fromtimestamp(\n int(response.headers['X-Ratelimit-Reset'])\n ).strftime('%Y-%m-%d %H:%M:%S')\n remaining = response.headers['X-Ratelimit-Remaining']\n log.debug(\n 'Pushbullet notification sent. Database operations remaining until next reset: %s. '\n 'Next reset at: %s',\n remaining,\n reset_time,\n )\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PushbulletNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])\n", "path": "flexget/components/notify/notifiers/pushbullet.py"}], "after_files": [{"content": "import base64\nimport datetime\nimport logging\n\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\nfrom flexget.plugin import PluginWarning\nfrom flexget.utils.requests import Session as RequestSession\nfrom flexget.utils.requests import TimedLimiter\n\nplugin_name = 'pushbullet'\nlog = logging.getLogger(plugin_name)\n\nPUSHBULLET_URL = 'https://api.pushbullet.com/v2/pushes'\n\nrequests = RequestSession(max_retries=3)\nrequests.add_domain_limiter(TimedLimiter('pushbullet.com', '5 seconds'))\n\n\nclass PushbulletNotifier:\n \"\"\"\n Example::\n\n notify:\n entries:\n via:\n pushbullet:\n apikey: <API_KEY>\n [device: <DEVICE_IDEN> (can also be a list of device ids, or don't specify any ids to send to all devices)]\n [email: <EMAIL_ADDRESS> (can also be a list of user email addresses)]\n [channel: <CHANNEL_TAG> (you can only specify device / email or channel tag, cannot use both)]\n\n Configuration parameters are also supported from entries (eg. through set).\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'api_key': one_or_more({'type': 'string'}),\n 'device': one_or_more({'type': 'string'}),\n 'email': one_or_more({'type': 'string', 'format': 'email'}),\n 'url': {'type': 'string'},\n 'channel': {'type': 'string'},\n 'file_template': {'type': 'string'},\n },\n 'required': ['api_key'],\n 'oneOf': [\n {'required': ['device']},\n {'required': ['channel']},\n {'required': ['email']},\n {\n 'not': {\n 'anyOf': [\n {'required': ['device']},\n {'required': ['channel']},\n {'required': ['email']},\n ]\n }\n },\n ],\n 'error_oneOf': 'One (and only one) of `email`, `device` or `channel` are allowed.',\n 'additionalProperties': False,\n }\n\n def notify(self, title, message, config):\n \"\"\"\n Send a Pushbullet notification\n \"\"\"\n if config.get('device') and not isinstance(config['device'], list):\n config['device'] = [config['device']]\n\n if config.get('email') and not isinstance(config['email'], list):\n config['email'] = [config['email']]\n\n if not isinstance(config['api_key'], list):\n config['api_key'] = [config['api_key']]\n\n for key in config['api_key']:\n if config.get('channel'):\n self.send_push(\n key, title, message, config.get('url'), config.get('channel'), 'channel_tag'\n )\n elif config.get('device'):\n for d in config['device']:\n self.send_push(key, title, message, config.get('url'), d, 'device_iden')\n elif config.get('email'):\n for e in config['email']:\n self.send_push(key, title, message, config.get('url'), e, 'email')\n else:\n self.send_push(key, title, message, config.get('url'))\n\n @staticmethod\n def send_push(api_key, title, body, url=None, destination=None, destination_type=None):\n push_type = 'link' if url else 'note'\n notification = {'type': push_type, 'title': title, 'body': body}\n if url:\n notification['url'] = url\n if destination:\n notification[destination_type] = destination\n\n # Make the request\n headers = {\n 'Authorization': b'Basic ' + base64.b64encode(api_key.encode('ascii')),\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'User-Agent': 'Flexget',\n }\n try:\n response = requests.post(PUSHBULLET_URL, headers=headers, json=notification)\n except RequestException as e:\n if e.response is not None:\n if e.response.status_code == 429:\n reset_time = e.response.headers.get('X-Ratelimit-Reset')\n if reset_time:\n reset_time = datetime.datetime.fromtimestamp(int(reset_time)).strftime(\n '%Y-%m-%d %H:%M:%S'\n )\n message = f'Monthly Pushbullet database operations limit reached. Next reset: {reset_time}'\n else:\n message = e.response.json()['error']['message']\n else:\n message = str(e)\n raise PluginWarning(message)\n\n reset_time = response.headers.get('X-Ratelimit-Reset')\n remaining = response.headers.get('X-Ratelimit-Remaining')\n if reset_time and remaining:\n reset_time = datetime.datetime.fromtimestamp(int(reset_time))\n log.debug(\n 'Pushbullet notification sent. Database operations remaining until next reset: %s. '\n 'Next reset at: %s',\n remaining,\n reset_time,\n )\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PushbulletNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])\n", "path": "flexget/components/notify/notifiers/pushbullet.py"}]} | 2,048 | 413 |
gh_patches_debug_5182 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`atexit.register` does not work
### What I did
```python3
from pwn import *
atexit.register(print, "hello world")
exit()
```
### What I expected to see
```python3 test.py
hello world
```
### What I saw
Nothing
I noticed this because `asm()`, which adds an `atexit` handler to remove the `/tmp/pwn-asm-XXXXXX` folder, does not in fact remove it, meaning multiple script runs leads to many similar folders.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/atexit.py`
Content:
```
1 """
2 Replacement for the Python standard library's atexit.py.
3
4 Whereas the standard :mod:`atexit` module only defines :func:`atexit.register`,
5 this replacement module also defines :func:`unregister`.
6
7 This module also fixes a the issue that exceptions raised by an exit handler is
8 printed twice when the standard :mod:`atexit` is used.
9 """
10 from __future__ import absolute_import
11 from __future__ import division
12
13 import sys
14 import threading
15 import traceback
16
17 from pwnlib.context import context
18
19 __all__ = ['register', 'unregister']
20
21 _lock = threading.Lock()
22 _ident = 0
23 _handlers = {}
24
25 def register(func, *args, **kwargs):
26 """register(func, *args, **kwargs)
27
28 Registers a function to be called on program termination. The function will
29 be called with positional arguments `args` and keyword arguments `kwargs`,
30 i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will
31 be the one used when the handler is run.
32
33 E.g. to suppress logging output from an exit-handler one could write::
34
35 with context.local(log_level = 'error'):
36 atexit.register(handler)
37
38 An identifier is returned which can be used to unregister the exit-handler.
39
40 This function can be used as a decorator::
41
42 @atexit.register
43 def handler():
44 ...
45
46 Notice however that this will bind ``handler`` to the identifier and not the
47 actual exit-handler. The exit-handler can then be unregistered with::
48
49 atexit.unregister(handler)
50
51 This function is thread safe.
52
53 """
54 global _ident
55 with _lock:
56 ident = _ident
57 _ident += 1
58 _handlers[ident] = (func, args, kwargs, vars(context))
59 return ident
60
61 def unregister(ident):
62 """unregister(ident)
63
64 Remove the exit-handler identified by `ident` from the list of registered
65 handlers. If `ident` isn't registered this is a no-op.
66 """
67 if ident in _handlers:
68 del _handlers[ident]
69
70 def _run_handlers():
71 """_run_handlers()
72
73 Run registered exit-handlers. They run in the reverse order of which they
74 were registered.
75
76 If a handler raises an exception, it will be printed but nothing else
77 happens, i.e. other handlers will be run and `sys.excepthook` will not be
78 called for that reason.
79 """
80 context.clear()
81 for _ident, (func, args, kwargs, ctx) in \
82 sorted(_handlers.items(), reverse = True):
83 try:
84 with context.local(**ctx):
85 func(*args, **kwargs)
86 except SystemExit:
87 pass
88 except Exception:
89 # extract the current exception and rewind the traceback to where it
90 # originated
91 typ, val, tb = sys.exc_info()
92 traceback.print_exception(typ, val, tb.tb_next)
93
94 # if there's already an exitfunc registered be sure to run that too
95 if hasattr(sys, "exitfunc"):
96 register(sys.exitfunc)
97
98 sys.exitfunc = _run_handlers
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py
--- a/pwnlib/atexit.py
+++ b/pwnlib/atexit.py
@@ -13,6 +13,7 @@
import sys
import threading
import traceback
+import atexit as std_atexit
from pwnlib.context import context
@@ -95,4 +96,8 @@
if hasattr(sys, "exitfunc"):
register(sys.exitfunc)
-sys.exitfunc = _run_handlers
+if sys.version_info[0] < 3:
+ sys.exitfunc = _run_handlers
+else:
+ std_atexit.register(_run_handlers)
+
| {"golden_diff": "diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py\n--- a/pwnlib/atexit.py\n+++ b/pwnlib/atexit.py\n@@ -13,6 +13,7 @@\n import sys\n import threading\n import traceback\n+import atexit as std_atexit\n \n from pwnlib.context import context\n \n@@ -95,4 +96,8 @@\n if hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n \n-sys.exitfunc = _run_handlers\n+if sys.version_info[0] < 3:\n+ sys.exitfunc = _run_handlers\n+else:\n+ std_atexit.register(_run_handlers)\n+\n", "issue": "`atexit.register` does not work\n### What I did\r\n```python3\r\nfrom pwn import *\r\natexit.register(print, \"hello world\")\r\nexit()\r\n```\r\n### What I expected to see\r\n```python3 test.py\r\nhello world\r\n```\r\n### What I saw\r\nNothing\r\n\r\nI noticed this because `asm()`, which adds an `atexit` handler to remove the `/tmp/pwn-asm-XXXXXX` folder, does not in fact remove it, meaning multiple script runs leads to many similar folders.\n", "before_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sys\nimport threading\nimport traceback\n\nfrom pwnlib.context import context\n\n__all__ = ['register', 'unregister']\n\n_lock = threading.Lock()\n_ident = 0\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called on program termination. The function will\n be called with positional arguments `args` and keyword arguments `kwargs`,\n i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n be the one used when the handler is run.\n\n E.g. to suppress logging output from an exit-handler one could write::\n\n with context.local(log_level = 'error'):\n atexit.register(handler)\n\n An identifier is returned which can be used to unregister the exit-handler.\n\n This function can be used as a decorator::\n\n @atexit.register\n def handler():\n ...\n\n Notice however that this will bind ``handler`` to the identifier and not the\n actual exit-handler. The exit-handler can then be unregistered with::\n\n atexit.unregister(handler)\n\n This function is thread safe.\n\n \"\"\"\n global _ident\n with _lock:\n ident = _ident\n _ident += 1\n _handlers[ident] = (func, args, kwargs, vars(context))\n return ident\n\ndef unregister(ident):\n \"\"\"unregister(ident)\n\n Remove the exit-handler identified by `ident` from the list of registered\n handlers. If `ident` isn't registered this is a no-op.\n \"\"\"\n if ident in _handlers:\n del _handlers[ident]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit-handlers. They run in the reverse order of which they\n were registered.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n context.clear()\n for _ident, (func, args, kwargs, ctx) in \\\n sorted(_handlers.items(), reverse = True):\n try:\n with context.local(**ctx):\n func(*args, **kwargs)\n except SystemExit:\n pass\n except Exception:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}], "after_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sys\nimport threading\nimport traceback\nimport atexit as std_atexit\n\nfrom pwnlib.context import context\n\n__all__ = ['register', 'unregister']\n\n_lock = threading.Lock()\n_ident = 0\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called on program termination. The function will\n be called with positional arguments `args` and keyword arguments `kwargs`,\n i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n be the one used when the handler is run.\n\n E.g. to suppress logging output from an exit-handler one could write::\n\n with context.local(log_level = 'error'):\n atexit.register(handler)\n\n An identifier is returned which can be used to unregister the exit-handler.\n\n This function can be used as a decorator::\n\n @atexit.register\n def handler():\n ...\n\n Notice however that this will bind ``handler`` to the identifier and not the\n actual exit-handler. The exit-handler can then be unregistered with::\n\n atexit.unregister(handler)\n\n This function is thread safe.\n\n \"\"\"\n global _ident\n with _lock:\n ident = _ident\n _ident += 1\n _handlers[ident] = (func, args, kwargs, vars(context))\n return ident\n\ndef unregister(ident):\n \"\"\"unregister(ident)\n\n Remove the exit-handler identified by `ident` from the list of registered\n handlers. If `ident` isn't registered this is a no-op.\n \"\"\"\n if ident in _handlers:\n del _handlers[ident]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit-handlers. They run in the reverse order of which they\n were registered.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n context.clear()\n for _ident, (func, args, kwargs, ctx) in \\\n sorted(_handlers.items(), reverse = True):\n try:\n with context.local(**ctx):\n func(*args, **kwargs)\n except SystemExit:\n pass\n except Exception:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nif sys.version_info[0] < 3:\n sys.exitfunc = _run_handlers\nelse:\n std_atexit.register(_run_handlers)\n\n", "path": "pwnlib/atexit.py"}]} | 1,241 | 146 |
gh_patches_debug_20723 | rasdani/github-patches | git_diff | streamlit__streamlit-1737 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
st.write throws value is null when string is too big
# Summary
calling `st.write` with a string that is too big will result on an error message on the front end with message "value is null"
<img width="551" alt="Screen Shot 2019-10-16 at 3 57 57 PM" src="https://user-images.githubusercontent.com/934511/66950384-a17d0e00-f02e-11e9-87bb-1df7158794b5.png">
# Steps to reproduce
1. create a script with a variable that holds a string that's more than weights more than 50mb
2. streamlit run yourscript.py
3. message will appear on the front end
## Expected behavior:
The error message should be clearer.
## Actual behavior:
Error message does not point to the string length constrain of `st.write`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/server/server_util.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Server related utility functions"""
16
17 from typing import Callable, List, Optional, Union
18
19 from streamlit import config
20 from streamlit import net_util
21 from streamlit import type_util
22 from streamlit import url_util
23 from streamlit.ForwardMsgCache import populate_hash_if_needed
24
25 # Largest message that can be sent via the WebSocket connection.
26 # (Limit was picked arbitrarily)
27 # TODO: Break message in several chunks if too large.
28 MESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB
29
30
31 def is_cacheable_msg(msg):
32 """True if the given message qualifies for caching.
33
34 Parameters
35 ----------
36 msg : ForwardMsg
37
38 Returns
39 -------
40 bool
41 True if we should cache the message.
42
43 """
44 if msg.WhichOneof("type") in {"ref_hash", "initialize"}:
45 # Some message types never get cached
46 return False
47 return msg.ByteSize() >= config.get_option("global.minCachedMessageSize")
48
49
50 def serialize_forward_msg(msg):
51 """Serialize a ForwardMsg to send to a client.
52
53 If the message is too large, it will be converted to an exception message
54 instead.
55
56 Parameters
57 ----------
58 msg : ForwardMsg
59 The message to serialize
60
61 Returns
62 -------
63 str
64 The serialized byte string to send
65
66 """
67 populate_hash_if_needed(msg)
68 msg_str = msg.SerializeToString()
69
70 if len(msg_str) > MESSAGE_SIZE_LIMIT:
71 _convert_msg_to_exception_msg(msg, RuntimeError("Data too large"))
72 msg_str = msg.SerializeToString()
73
74 return msg_str
75
76
77 def _convert_msg_to_exception_msg(msg, e):
78 import streamlit.elements.exception_proto as exception_proto
79
80 delta_id = msg.metadata.delta_id
81 msg.Clear()
82 msg.metadata.delta_id = delta_id
83
84 exception_proto.marshall(msg.delta.new_element.exception, e)
85
86
87 def is_url_from_allowed_origins(url):
88 """Return True if URL is from allowed origins (for CORS purpose).
89
90 Allowed origins:
91 1. localhost
92 2. The internal and external IP addresses of the machine where this
93 function was called from.
94 3. The cloud storage domain configured in `s3.bucket`.
95
96 If `server.enableCORS` is False, this allows all origins.
97
98 Parameters
99 ----------
100 url : str
101 The URL to check
102
103 Returns
104 -------
105 bool
106 True if URL is accepted. False otherwise.
107
108 """
109 if not config.get_option("server.enableCORS"):
110 # Allow everything when CORS is disabled.
111 return True
112
113 hostname = url_util.get_hostname(url)
114
115 allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]
116 # Check localhost first.
117 "localhost",
118 "0.0.0.0",
119 "127.0.0.1",
120 # Try to avoid making unecessary HTTP requests by checking if the user
121 # manually specified a server address.
122 _get_server_address_if_manually_set,
123 _get_s3_url_host_if_manually_set,
124 # Then try the options that depend on HTTP requests or opening sockets.
125 net_util.get_internal_ip,
126 net_util.get_external_ip,
127 lambda: config.get_option("s3.bucket"),
128 ]
129
130 for allowed_domain in allowed_domains:
131 if callable(allowed_domain):
132 allowed_domain = allowed_domain()
133
134 if allowed_domain is None:
135 continue
136
137 if hostname == allowed_domain:
138 return True
139
140 return False
141
142
143 def _get_server_address_if_manually_set() -> Optional[str]:
144 if config.is_manually_set("browser.serverAddress"):
145 return url_util.get_hostname(config.get_option("browser.serverAddress"))
146 return None
147
148
149 def _get_s3_url_host_if_manually_set() -> Optional[str]:
150 if config.is_manually_set("s3.url"):
151 return url_util.get_hostname(config.get_option("s3.url"))
152 return None
153
154
155 def make_url_path_regex(*path, **kwargs):
156 """Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz)."""
157 path = [x.strip("/") for x in path if x] # Filter out falsy components.
158 path_format = r"^/%s/?$" if kwargs.get("trailing_slash", True) else r"^/%s$"
159 return path_format % "/".join(path)
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/server/server_util.py b/lib/streamlit/server/server_util.py
--- a/lib/streamlit/server/server_util.py
+++ b/lib/streamlit/server/server_util.py
@@ -68,22 +68,19 @@
msg_str = msg.SerializeToString()
if len(msg_str) > MESSAGE_SIZE_LIMIT:
- _convert_msg_to_exception_msg(msg, RuntimeError("Data too large"))
+ import streamlit.elements.exception_proto as exception_proto
+
+ error = RuntimeError(
+ f"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB"
+ )
+ # Overwrite the offending ForwardMsg.delta with an error to display.
+ # This assumes that the size limit wasn't exceeded due to metadata.
+ exception_proto.marshall(msg.delta.new_element.exception, error)
msg_str = msg.SerializeToString()
return msg_str
-def _convert_msg_to_exception_msg(msg, e):
- import streamlit.elements.exception_proto as exception_proto
-
- delta_id = msg.metadata.delta_id
- msg.Clear()
- msg.metadata.delta_id = delta_id
-
- exception_proto.marshall(msg.delta.new_element.exception, e)
-
-
def is_url_from_allowed_origins(url):
"""Return True if URL is from allowed origins (for CORS purpose).
| {"golden_diff": "diff --git a/lib/streamlit/server/server_util.py b/lib/streamlit/server/server_util.py\n--- a/lib/streamlit/server/server_util.py\n+++ b/lib/streamlit/server/server_util.py\n@@ -68,22 +68,19 @@\n msg_str = msg.SerializeToString()\n \n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n- _convert_msg_to_exception_msg(msg, RuntimeError(\"Data too large\"))\n+ import streamlit.elements.exception_proto as exception_proto\n+\n+ error = RuntimeError(\n+ f\"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB\"\n+ )\n+ # Overwrite the offending ForwardMsg.delta with an error to display.\n+ # This assumes that the size limit wasn't exceeded due to metadata.\n+ exception_proto.marshall(msg.delta.new_element.exception, error)\n msg_str = msg.SerializeToString()\n \n return msg_str\n \n \n-def _convert_msg_to_exception_msg(msg, e):\n- import streamlit.elements.exception_proto as exception_proto\n-\n- delta_id = msg.metadata.delta_id\n- msg.Clear()\n- msg.metadata.delta_id = delta_id\n-\n- exception_proto.marshall(msg.delta.new_element.exception, e)\n-\n-\n def is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n", "issue": "st.write throws value is null when string is too big\n# Summary\r\ncalling `st.write` with a string that is too big will result on an error message on the front end with message \"value is null\"\r\n\r\n<img width=\"551\" alt=\"Screen Shot 2019-10-16 at 3 57 57 PM\" src=\"https://user-images.githubusercontent.com/934511/66950384-a17d0e00-f02e-11e9-87bb-1df7158794b5.png\">\r\n\r\n# Steps to reproduce\r\n1. create a script with a variable that holds a string that's more than weights more than 50mb\r\n2. streamlit run yourscript.py\r\n3. message will appear on the front end\r\n\r\n\r\n## Expected behavior:\r\nThe error message should be clearer.\r\n\r\n## Actual behavior:\r\nError message does not point to the string length constrain of `st.write`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Server related utility functions\"\"\"\n\nfrom typing import Callable, List, Optional, Union\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import type_util\nfrom streamlit import url_util\nfrom streamlit.ForwardMsgCache import populate_hash_if_needed\n\n# Largest message that can be sent via the WebSocket connection.\n# (Limit was picked arbitrarily)\n# TODO: Break message in several chunks if too large.\nMESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB\n\n\ndef is_cacheable_msg(msg):\n \"\"\"True if the given message qualifies for caching.\n\n Parameters\n ----------\n msg : ForwardMsg\n\n Returns\n -------\n bool\n True if we should cache the message.\n\n \"\"\"\n if msg.WhichOneof(\"type\") in {\"ref_hash\", \"initialize\"}:\n # Some message types never get cached\n return False\n return msg.ByteSize() >= config.get_option(\"global.minCachedMessageSize\")\n\n\ndef serialize_forward_msg(msg):\n \"\"\"Serialize a ForwardMsg to send to a client.\n\n If the message is too large, it will be converted to an exception message\n instead.\n\n Parameters\n ----------\n msg : ForwardMsg\n The message to serialize\n\n Returns\n -------\n str\n The serialized byte string to send\n\n \"\"\"\n populate_hash_if_needed(msg)\n msg_str = msg.SerializeToString()\n\n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n _convert_msg_to_exception_msg(msg, RuntimeError(\"Data too large\"))\n msg_str = msg.SerializeToString()\n\n return msg_str\n\n\ndef _convert_msg_to_exception_msg(msg, e):\n import streamlit.elements.exception_proto as exception_proto\n\n delta_id = msg.metadata.delta_id\n msg.Clear()\n msg.metadata.delta_id = delta_id\n\n exception_proto.marshall(msg.delta.new_element.exception, e)\n\n\ndef is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n\n Allowed origins:\n 1. localhost\n 2. The internal and external IP addresses of the machine where this\n function was called from.\n 3. The cloud storage domain configured in `s3.bucket`.\n\n If `server.enableCORS` is False, this allows all origins.\n\n Parameters\n ----------\n url : str\n The URL to check\n\n Returns\n -------\n bool\n True if URL is accepted. False otherwise.\n\n \"\"\"\n if not config.get_option(\"server.enableCORS\"):\n # Allow everything when CORS is disabled.\n return True\n\n hostname = url_util.get_hostname(url)\n\n allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]\n # Check localhost first.\n \"localhost\",\n \"0.0.0.0\",\n \"127.0.0.1\",\n # Try to avoid making unecessary HTTP requests by checking if the user\n # manually specified a server address.\n _get_server_address_if_manually_set,\n _get_s3_url_host_if_manually_set,\n # Then try the options that depend on HTTP requests or opening sockets.\n net_util.get_internal_ip,\n net_util.get_external_ip,\n lambda: config.get_option(\"s3.bucket\"),\n ]\n\n for allowed_domain in allowed_domains:\n if callable(allowed_domain):\n allowed_domain = allowed_domain()\n\n if allowed_domain is None:\n continue\n\n if hostname == allowed_domain:\n return True\n\n return False\n\n\ndef _get_server_address_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"browser.serverAddress\"):\n return url_util.get_hostname(config.get_option(\"browser.serverAddress\"))\n return None\n\n\ndef _get_s3_url_host_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"s3.url\"):\n return url_util.get_hostname(config.get_option(\"s3.url\"))\n return None\n\n\ndef make_url_path_regex(*path, **kwargs):\n \"\"\"Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).\"\"\"\n path = [x.strip(\"/\") for x in path if x] # Filter out falsy components.\n path_format = r\"^/%s/?$\" if kwargs.get(\"trailing_slash\", True) else r\"^/%s$\"\n return path_format % \"/\".join(path)\n", "path": "lib/streamlit/server/server_util.py"}], "after_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Server related utility functions\"\"\"\n\nfrom typing import Callable, List, Optional, Union\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import type_util\nfrom streamlit import url_util\nfrom streamlit.ForwardMsgCache import populate_hash_if_needed\n\n# Largest message that can be sent via the WebSocket connection.\n# (Limit was picked arbitrarily)\n# TODO: Break message in several chunks if too large.\nMESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB\n\n\ndef is_cacheable_msg(msg):\n \"\"\"True if the given message qualifies for caching.\n\n Parameters\n ----------\n msg : ForwardMsg\n\n Returns\n -------\n bool\n True if we should cache the message.\n\n \"\"\"\n if msg.WhichOneof(\"type\") in {\"ref_hash\", \"initialize\"}:\n # Some message types never get cached\n return False\n return msg.ByteSize() >= config.get_option(\"global.minCachedMessageSize\")\n\n\ndef serialize_forward_msg(msg):\n \"\"\"Serialize a ForwardMsg to send to a client.\n\n If the message is too large, it will be converted to an exception message\n instead.\n\n Parameters\n ----------\n msg : ForwardMsg\n The message to serialize\n\n Returns\n -------\n str\n The serialized byte string to send\n\n \"\"\"\n populate_hash_if_needed(msg)\n msg_str = msg.SerializeToString()\n\n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n import streamlit.elements.exception_proto as exception_proto\n\n error = RuntimeError(\n f\"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB\"\n )\n # Overwrite the offending ForwardMsg.delta with an error to display.\n # This assumes that the size limit wasn't exceeded due to metadata.\n exception_proto.marshall(msg.delta.new_element.exception, error)\n msg_str = msg.SerializeToString()\n\n return msg_str\n\n\ndef is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n\n Allowed origins:\n 1. localhost\n 2. The internal and external IP addresses of the machine where this\n function was called from.\n 3. The cloud storage domain configured in `s3.bucket`.\n\n If `server.enableCORS` is False, this allows all origins.\n\n Parameters\n ----------\n url : str\n The URL to check\n\n Returns\n -------\n bool\n True if URL is accepted. False otherwise.\n\n \"\"\"\n if not config.get_option(\"server.enableCORS\"):\n # Allow everything when CORS is disabled.\n return True\n\n hostname = url_util.get_hostname(url)\n\n allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]\n # Check localhost first.\n \"localhost\",\n \"0.0.0.0\",\n \"127.0.0.1\",\n # Try to avoid making unecessary HTTP requests by checking if the user\n # manually specified a server address.\n _get_server_address_if_manually_set,\n _get_s3_url_host_if_manually_set,\n # Then try the options that depend on HTTP requests or opening sockets.\n net_util.get_internal_ip,\n net_util.get_external_ip,\n lambda: config.get_option(\"s3.bucket\"),\n ]\n\n for allowed_domain in allowed_domains:\n if callable(allowed_domain):\n allowed_domain = allowed_domain()\n\n if allowed_domain is None:\n continue\n\n if hostname == allowed_domain:\n return True\n\n return False\n\n\ndef _get_server_address_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"browser.serverAddress\"):\n return url_util.get_hostname(config.get_option(\"browser.serverAddress\"))\n return None\n\n\ndef _get_s3_url_host_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"s3.url\"):\n return url_util.get_hostname(config.get_option(\"s3.url\"))\n return None\n\n\ndef make_url_path_regex(*path, **kwargs):\n \"\"\"Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).\"\"\"\n path = [x.strip(\"/\") for x in path if x] # Filter out falsy components.\n path_format = r\"^/%s/?$\" if kwargs.get(\"trailing_slash\", True) else r\"^/%s$\"\n return path_format % \"/\".join(path)\n", "path": "lib/streamlit/server/server_util.py"}]} | 1,945 | 298 |
gh_patches_debug_12795 | rasdani/github-patches | git_diff | pypa__setuptools-2381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip install -e fails on version 50: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command
```
> pip install -e .
ERROR: Command errored out with exit status 1:
command: /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/bin/python3.6 -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/Users/jaykarimi/Documents/vanir/setup.py'"'"'; __file__='"'"'/Users/jaykarimi/Documents/vanir/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info
cwd: /Users/jaykarimi/Documents/vanir/
Complete output (19 lines):
/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/_distutils_hack/__init__.py:30: UserWarning: Setuptools is replacing distutils.
warnings.warn("Setuptools is replacing distutils.")
/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py:452: UserWarning: Normalizing 'v1.7.1' to '1.7.1'
warnings.warn(tmpl.format(**locals()))
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/jaykarimi/Documents/vanir/setup.py", line 127, in <module>
setup(**_conf)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/__init__.py", line 153, in setup
return distutils.core.setup(**attrs)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/core.py", line 134, in setup
ok = dist.parse_command_line()
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py", line 484, in parse_command_line
args = self._parse_command_opts(parser, args)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py", line 903, in _parse_command_opts
nargs = _Distribution._parse_command_opts(self, parser, args)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py", line 548, in _parse_command_opts
"command class %s must subclass Command" % cmd_class)
distutils.errors.DistutilsClassError: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command
----------------------------------------
ERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `_distutils_hack/__init__.py`
Content:
```
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 def warn_distutils_present():
12 if 'distutils' not in sys.modules:
13 return
14 if is_pypy and sys.version_info < (3, 7):
15 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
16 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
17 return
18 warnings.warn(
19 "Distutils was imported before Setuptools, but importing Setuptools "
20 "also replaces the `distutils` module in `sys.modules`. This may lead "
21 "to undesirable behaviors or errors. To avoid these issues, avoid "
22 "using distutils directly, ensure that setuptools is installed in the "
23 "traditional way (e.g. not an editable install), and/or make sure "
24 "that setuptools is always imported before distutils.")
25
26
27 def clear_distutils():
28 if 'distutils' not in sys.modules:
29 return
30 warnings.warn("Setuptools is replacing distutils.")
31 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
32 for name in mods:
33 del sys.modules[name]
34
35
36 def enabled():
37 """
38 Allow selection of distutils by environment variable.
39 """
40 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
41 return which == 'local'
42
43
44 def ensure_local_distutils():
45 clear_distutils()
46 distutils = importlib.import_module('setuptools._distutils')
47 distutils.__name__ = 'distutils'
48 sys.modules['distutils'] = distutils
49
50 # sanity check that submodules load as expected
51 core = importlib.import_module('distutils.core')
52 assert '_distutils' in core.__file__, core.__file__
53
54
55 def do_override():
56 """
57 Ensure that the local copy of distutils is preferred over stdlib.
58
59 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
60 for more motivation.
61 """
62 if enabled():
63 warn_distutils_present()
64 ensure_local_distutils()
65
66
67 class DistutilsMetaFinder:
68 def find_spec(self, fullname, path, target=None):
69 if path is not None:
70 return
71
72 method_name = 'spec_for_{fullname}'.format(**locals())
73 method = getattr(self, method_name, lambda: None)
74 return method()
75
76 def spec_for_distutils(self):
77 import importlib.abc
78 import importlib.util
79
80 class DistutilsLoader(importlib.abc.Loader):
81
82 def create_module(self, spec):
83 return importlib.import_module('setuptools._distutils')
84
85 def exec_module(self, module):
86 pass
87
88 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
89
90 def spec_for_pip(self):
91 """
92 Ensure stdlib distutils when running under pip.
93 See pypa/pip#8761 for rationale.
94 """
95 clear_distutils()
96 self.spec_for_distutils = lambda: None
97
98
99 DISTUTILS_FINDER = DistutilsMetaFinder()
100
101
102 def add_shim():
103 sys.meta_path.insert(0, DISTUTILS_FINDER)
104
105
106 def remove_shim():
107 try:
108 sys.meta_path.remove(DISTUTILS_FINDER)
109 except ValueError:
110 pass
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -92,9 +92,22 @@
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
+ if self.pip_imported_during_build():
+ return
clear_distutils()
self.spec_for_distutils = lambda: None
+ @staticmethod
+ def pip_imported_during_build():
+ """
+ Detect if pip is being imported in a build script. Ref #2355.
+ """
+ import traceback
+ return any(
+ frame.f_globals['__file__'].endswith('setup.py')
+ for frame, line in traceback.walk_stack(None)
+ )
+
DISTUTILS_FINDER = DistutilsMetaFinder()
| {"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -92,9 +92,22 @@\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n+ if self.pip_imported_during_build():\n+ return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n \n+ @staticmethod\n+ def pip_imported_during_build():\n+ \"\"\"\n+ Detect if pip is being imported in a build script. Ref #2355.\n+ \"\"\"\n+ import traceback\n+ return any(\n+ frame.f_globals['__file__'].endswith('setup.py')\n+ for frame, line in traceback.walk_stack(None)\n+ )\n+\n \n DISTUTILS_FINDER = DistutilsMetaFinder()\n", "issue": "pip install -e fails on version 50: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command\n```\r\n> pip install -e .\r\n ERROR: Command errored out with exit status 1:\r\n command: /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/bin/python3.6 -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/Users/jaykarimi/Documents/vanir/setup.py'\"'\"'; __file__='\"'\"'/Users/jaykarimi/Documents/vanir/setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' egg_info\r\n cwd: /Users/jaykarimi/Documents/vanir/\r\n Complete output (19 lines):\r\n /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/_distutils_hack/__init__.py:30: UserWarning: Setuptools is replacing distutils.\r\n warnings.warn(\"Setuptools is replacing distutils.\")\r\n /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py:452: UserWarning: Normalizing 'v1.7.1' to '1.7.1'\r\n warnings.warn(tmpl.format(**locals()))\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/Users/jaykarimi/Documents/vanir/setup.py\", line 127, in <module>\r\n setup(**_conf)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/__init__.py\", line 153, in setup\r\n return distutils.core.setup(**attrs)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/core.py\", line 134, in setup\r\n ok = dist.parse_command_line()\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py\", line 484, in parse_command_line\r\n args = self._parse_command_opts(parser, args)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py\", line 903, in _parse_command_opts\r\n nargs = _Distribution._parse_command_opts(self, parser, args)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py\", line 548, in _parse_command_opts\r\n \"command class %s must subclass Command\" % cmd_class)\r\n distutils.errors.DistutilsClassError: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.\r\n```\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}], "after_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @staticmethod\n def pip_imported_during_build():\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n frame.f_globals['__file__'].endswith('setup.py')\n for frame, line in traceback.walk_stack(None)\n )\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]} | 2,052 | 215 |
gh_patches_debug_16921 | rasdani/github-patches | git_diff | tinygrad__tinygrad-667 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Assertion error running deep_deterministic_policy_gradient.py example
- Python 3.9.2
- osx M1
` python3 deep_deterministic_policy_gradient.py`
```
cuda backend not available No module named 'pycuda'
gpu backend not available No module named 'pyopencl'
metal backend not available No module named 'Metal'
Traceback (most recent call last):
File "tinygrad/examples/deep_deterministic_policy_gradient.py", line 229, in <module>
agent = DeepDeterministicPolicyGradient(env)
File "tinygrad/examples/deep_deterministic_policy_gradient.py", line 166, in __init__
self.update_network_parameters(tau=1.0)
File "tinygrad/examples/deep_deterministic_policy_gradient.py", line 176, in update_network_parameters
target_param.assign(param * tau + target_param * (1.0 - tau))
File "tinygrad/tinygrad/tensor.py", line 83, in assign
assert not x.requires_grad # self requires_grad is okay?
AssertionError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/deep_deterministic_policy_gradient.py`
Content:
```
1 from typing import Optional, Tuple
2 from numpy.typing import NDArray
3
4 from tinygrad.tensor import Tensor
5 from tinygrad.nn import optim
6 from tinygrad.helpers import getenv
7
8 import numpy as np
9 import gym
10
11
12 DEVICE = "GPU" if getenv("GPU") else "CPU"
13
14
15 class Actor:
16 def __init__(self, num_actions: int, num_states: int, hidden_size: Tuple[int, int] = (400, 300)):
17 self.l1 = Tensor.glorot_uniform(num_states, hidden_size[0])
18 self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])
19 self.mu = Tensor.glorot_uniform(hidden_size[1], num_actions)
20
21 def forward(self, state: Tensor, upper_bound: float) -> Tensor:
22 out = state.dot(self.l1).relu()
23 out = out.dot(self.l2).relu()
24 out = out.dot(self.mu).tanh()
25 output = out * upper_bound
26
27 return output
28
29
30 class Critic:
31 def __init__(self, num_inputs: int, hidden_size: Tuple[int, int] = (400, 300)):
32 self.l1 = Tensor.glorot_uniform(num_inputs, hidden_size[0])
33 self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])
34 self.q = Tensor.glorot_uniform(hidden_size[1], 1)
35
36 def forward(self, state: Tensor, action: Tensor) -> Tensor:
37 inputs = state.cat(action, dim=1)
38 out = inputs.dot(self.l1).relu()
39 out = out.dot(self.l2).relu()
40 q = out.dot(self.q)
41
42 return q
43
44
45 class Buffer:
46 def __init__(self, num_actions: int, num_states: int, buffer_capacity: int = 100000, batch_size: int = 64):
47 self.buffer_capacity = buffer_capacity
48 self.batch_size = batch_size
49
50 self.buffer_counter = 0
51
52 self.state_buffer = np.zeros((self.buffer_capacity, num_states))
53 self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
54 self.reward_buffer = np.zeros((self.buffer_capacity, 1))
55 self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
56 self.done_buffer = np.zeros((self.buffer_capacity, 1))
57
58 def record(
59 self, observations: Tuple[Tensor, NDArray, float, NDArray, bool]
60 ) -> None:
61 index = self.buffer_counter % self.buffer_capacity
62
63 self.state_buffer[index] = observations[0].detach().numpy()
64 self.action_buffer[index] = observations[1]
65 self.reward_buffer[index] = observations[2]
66 self.next_state_buffer[index] = observations[3]
67 self.done_buffer[index] = observations[4]
68
69 self.buffer_counter += 1
70
71 def sample(self) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
72 record_range = min(self.buffer_counter, self.buffer_capacity)
73 batch_indices = np.random.choice(record_range, self.batch_size)
74
75 state_batch = Tensor(self.state_buffer[batch_indices], device=DEVICE, requires_grad=False)
76 action_batch = Tensor(self.action_buffer[batch_indices], device=DEVICE, requires_grad=False)
77 reward_batch = Tensor(self.reward_buffer[batch_indices], device=DEVICE, requires_grad=False)
78 next_state_batch = Tensor(self.next_state_buffer[batch_indices], device=DEVICE, requires_grad=False)
79 done_batch = Tensor(self.done_buffer[batch_indices], device=DEVICE, requires_grad=False)
80
81 return state_batch, action_batch, reward_batch, next_state_batch, done_batch
82
83
84 class GaussianActionNoise:
85 def __init__(self, mean: NDArray, std_deviation: NDArray):
86 self.mean = mean
87 self.std_dev = std_deviation
88
89 def __call__(self) -> Tensor:
90 return Tensor(
91 np.random.default_rng()
92 .normal(self.mean, self.std_dev, size=self.mean.shape)
93 .astype(np.float32),
94 device=DEVICE,
95 requires_grad=False,
96 )
97
98
99 class DeepDeterministicPolicyGradient:
100 """Deep Deterministic Policy Gradient (DDPG).
101
102 https://arxiv.org/pdf/1509.02971.pdf
103
104 Args:
105 env: The environment to learn from.
106 lr_actor: The learning rate of the actor.
107 lr_critic: The learning rate of the critic.
108 gamma: The discount factor.
109 buffer_capacity: The size of the replay buffer.
110 tau: The soft update coefficient.
111 hidden_size: The number of neurons in the hidden layers of the actor and critic networks.
112 batch_size: The minibatch size for each gradient update.
113 noise_stddev: The standard deviation of the exploration noise.
114
115 Note:
116 In contrast to the original paper, actions are already included in the first layer
117 of the Critic and we use a Gaussian distribution instead of an Ornstein Uhlenbeck
118 process for exploration noise.
119
120 """
121
122 def __init__(
123 self,
124 env: gym.Env,
125 lr_actor: float = 0.001,
126 lr_critic: float = 0.002,
127 gamma: float = 0.99,
128 buffer_capacity: int = 100000,
129 tau: float = 0.005,
130 hidden_size: Tuple[int, int] = (400, 300),
131 batch_size: int = 64,
132 noise_stddev: float = 0.1,
133 ):
134 self.num_states = env.observation_space.shape[0]
135 self.num_actions = env.action_space.shape[0]
136 self.max_action = env.action_space.high.item()
137 self.min_action = env.action_space.low.item()
138 self.gamma = gamma
139 self.tau = tau
140 self.memory = Buffer(
141 self.num_actions, self.num_states, buffer_capacity, batch_size
142 )
143 self.batch_size = batch_size
144
145 self.noise = GaussianActionNoise(
146 mean=np.zeros(self.num_actions),
147 std_deviation=noise_stddev * np.ones(self.num_actions),
148 )
149
150 self.actor = Actor(self.num_actions, self.num_states, hidden_size)
151 self.critic = Critic(self.num_actions + self.num_states, hidden_size)
152 self.target_actor = Actor(self.num_actions, self.num_states, hidden_size)
153 self.target_critic = Critic(self.num_actions + self.num_states, hidden_size)
154
155 actor_params = optim.get_parameters(self.actor)
156 critic_params = optim.get_parameters(self.critic)
157 target_actor_params = optim.get_parameters(self.target_actor)
158 target_critic_params = optim.get_parameters(self.target_critic)
159
160 if DEVICE == "GPU":
161 [x.gpu_() for x in actor_params + critic_params + target_actor_params + target_critic_params]
162
163 self.actor_optimizer = optim.Adam(actor_params, lr_actor)
164 self.critic_optimizer = optim.Adam(critic_params, lr_critic)
165
166 self.update_network_parameters(tau=1.0)
167
168 def update_network_parameters(self, tau: Optional[float] = None) -> None:
169 """Updates the parameters of the target networks via 'soft updates'."""
170 if tau is None:
171 tau = self.tau
172
173 for param, target_param in zip(
174 optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)
175 ):
176 target_param.assign(param * tau + target_param * (1.0 - tau))
177
178 for param, target_param in zip(
179 optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)
180 ):
181 target_param.assign(param * tau + target_param * (1.0 - tau))
182
183 def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:
184 mu = self.actor.forward(state, self.max_action)
185
186 if not evaluate:
187 mu = mu.add(self.noise())
188
189 mu = mu.clip(self.min_action, self.max_action)
190
191 return mu.detach().numpy()
192
193 def learn(self) -> None:
194 """Performs a learning step by sampling from replay buffer and updating networks."""
195 if self.memory.buffer_counter < self.batch_size:
196 return
197
198 (
199 state_batch,
200 action_batch,
201 reward_batch,
202 next_state_batch,
203 done_batch,
204 ) = self.memory.sample()
205
206 target_actions = self.target_actor.forward(next_state_batch, self.max_action)
207 y = reward_batch + self.gamma * self.target_critic.forward(
208 next_state_batch, target_actions.detach()
209 ) * (Tensor.ones(*done_batch.shape, device=DEVICE, requires_grad=False) - done_batch)
210
211 self.critic_optimizer.zero_grad()
212 critic_value = self.critic.forward(state_batch, action_batch)
213 critic_loss = y.detach().sub(critic_value).pow(2).mean()
214 critic_loss.backward()
215 self.critic_optimizer.step()
216
217 self.actor_optimizer.zero_grad()
218 actions = self.actor.forward(state_batch, self.max_action)
219 critic_value = self.critic.forward(state_batch, actions)
220 actor_loss = -critic_value.mean()
221 actor_loss.backward()
222 self.actor_optimizer.step()
223
224 self.update_network_parameters()
225
226
227 if __name__ == "__main__":
228 env = gym.make("Pendulum-v1")
229 agent = DeepDeterministicPolicyGradient(env)
230 num_episodes = 150
231
232 for episode in range(1, num_episodes+1):
233 cumulative_reward = 0.0
234 prev_state, info = env.reset() # for older gym versions only state is returned, so remove info
235 done = False
236
237 while not done:
238 prev_state = Tensor(prev_state, device=DEVICE, requires_grad=False)
239 action = agent.choose_action(prev_state)
240
241 state, reward, done, _, info = env.step(action) # for older gym versions there is only one bool, so remove _
242
243 cumulative_reward += reward
244
245 agent.memory.record((prev_state, action, reward, state, done))
246 agent.learn()
247
248 if done:
249 break
250
251 prev_state = state
252
253 print(
254 f"Episode {episode}/{num_episodes} - cumulative reward: {cumulative_reward}"
255 )
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/deep_deterministic_policy_gradient.py b/examples/deep_deterministic_policy_gradient.py
--- a/examples/deep_deterministic_policy_gradient.py
+++ b/examples/deep_deterministic_policy_gradient.py
@@ -173,12 +173,12 @@
for param, target_param in zip(
optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)
):
- target_param.assign(param * tau + target_param * (1.0 - tau))
+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))
for param, target_param in zip(
optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)
):
- target_param.assign(param * tau + target_param * (1.0 - tau))
+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))
def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:
mu = self.actor.forward(state, self.max_action)
| {"golden_diff": "diff --git a/examples/deep_deterministic_policy_gradient.py b/examples/deep_deterministic_policy_gradient.py\n--- a/examples/deep_deterministic_policy_gradient.py\n+++ b/examples/deep_deterministic_policy_gradient.py\n@@ -173,12 +173,12 @@\n for param, target_param in zip(\n optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)\n ):\n- target_param.assign(param * tau + target_param * (1.0 - tau))\n+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))\n \n for param, target_param in zip(\n optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)\n ):\n- target_param.assign(param * tau + target_param * (1.0 - tau))\n+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))\n \n def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:\n mu = self.actor.forward(state, self.max_action)\n", "issue": "Assertion error running deep_deterministic_policy_gradient.py example\n- Python 3.9.2\r\n- osx M1\r\n\r\n` python3 deep_deterministic_policy_gradient.py`\r\n\r\n```\r\ncuda backend not available No module named 'pycuda'\r\ngpu backend not available No module named 'pyopencl'\r\nmetal backend not available No module named 'Metal'\r\nTraceback (most recent call last):\r\n File \"tinygrad/examples/deep_deterministic_policy_gradient.py\", line 229, in <module>\r\n agent = DeepDeterministicPolicyGradient(env)\r\n File \"tinygrad/examples/deep_deterministic_policy_gradient.py\", line 166, in __init__\r\n self.update_network_parameters(tau=1.0)\r\n File \"tinygrad/examples/deep_deterministic_policy_gradient.py\", line 176, in update_network_parameters\r\n target_param.assign(param * tau + target_param * (1.0 - tau))\r\n File \"tinygrad/tinygrad/tensor.py\", line 83, in assign\r\n assert not x.requires_grad # self requires_grad is okay?\r\nAssertionError\r\n```\n", "before_files": [{"content": "from typing import Optional, Tuple\nfrom numpy.typing import NDArray\n\nfrom tinygrad.tensor import Tensor\nfrom tinygrad.nn import optim\nfrom tinygrad.helpers import getenv\n\nimport numpy as np\nimport gym\n\n\nDEVICE = \"GPU\" if getenv(\"GPU\") else \"CPU\"\n\n\nclass Actor:\n def __init__(self, num_actions: int, num_states: int, hidden_size: Tuple[int, int] = (400, 300)):\n self.l1 = Tensor.glorot_uniform(num_states, hidden_size[0])\n self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])\n self.mu = Tensor.glorot_uniform(hidden_size[1], num_actions)\n\n def forward(self, state: Tensor, upper_bound: float) -> Tensor:\n out = state.dot(self.l1).relu()\n out = out.dot(self.l2).relu()\n out = out.dot(self.mu).tanh()\n output = out * upper_bound\n\n return output\n\n\nclass Critic:\n def __init__(self, num_inputs: int, hidden_size: Tuple[int, int] = (400, 300)):\n self.l1 = Tensor.glorot_uniform(num_inputs, hidden_size[0])\n self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])\n self.q = Tensor.glorot_uniform(hidden_size[1], 1)\n\n def forward(self, state: Tensor, action: Tensor) -> Tensor:\n inputs = state.cat(action, dim=1)\n out = inputs.dot(self.l1).relu()\n out = out.dot(self.l2).relu()\n q = out.dot(self.q)\n\n return q\n\n\nclass Buffer:\n def __init__(self, num_actions: int, num_states: int, buffer_capacity: int = 100000, batch_size: int = 64):\n self.buffer_capacity = buffer_capacity\n self.batch_size = batch_size\n\n self.buffer_counter = 0\n\n self.state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.action_buffer = np.zeros((self.buffer_capacity, num_actions))\n self.reward_buffer = np.zeros((self.buffer_capacity, 1))\n self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.done_buffer = np.zeros((self.buffer_capacity, 1))\n\n def record(\n self, observations: Tuple[Tensor, NDArray, float, NDArray, bool]\n ) -> None:\n index = self.buffer_counter % self.buffer_capacity\n\n self.state_buffer[index] = observations[0].detach().numpy()\n self.action_buffer[index] = observations[1]\n self.reward_buffer[index] = observations[2]\n self.next_state_buffer[index] = observations[3]\n self.done_buffer[index] = observations[4]\n\n self.buffer_counter += 1\n\n def sample(self) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n record_range = min(self.buffer_counter, self.buffer_capacity)\n batch_indices = np.random.choice(record_range, self.batch_size)\n\n state_batch = Tensor(self.state_buffer[batch_indices], device=DEVICE, requires_grad=False)\n action_batch = Tensor(self.action_buffer[batch_indices], device=DEVICE, requires_grad=False)\n reward_batch = Tensor(self.reward_buffer[batch_indices], device=DEVICE, requires_grad=False)\n next_state_batch = Tensor(self.next_state_buffer[batch_indices], device=DEVICE, requires_grad=False)\n done_batch = Tensor(self.done_buffer[batch_indices], device=DEVICE, requires_grad=False)\n\n return state_batch, action_batch, reward_batch, next_state_batch, done_batch\n\n\nclass GaussianActionNoise:\n def __init__(self, mean: NDArray, std_deviation: NDArray):\n self.mean = mean\n self.std_dev = std_deviation\n\n def __call__(self) -> Tensor:\n return Tensor(\n np.random.default_rng()\n .normal(self.mean, self.std_dev, size=self.mean.shape)\n .astype(np.float32),\n device=DEVICE,\n requires_grad=False,\n )\n\n\nclass DeepDeterministicPolicyGradient:\n \"\"\"Deep Deterministic Policy Gradient (DDPG).\n\n https://arxiv.org/pdf/1509.02971.pdf\n\n Args:\n env: The environment to learn from.\n lr_actor: The learning rate of the actor.\n lr_critic: The learning rate of the critic.\n gamma: The discount factor.\n buffer_capacity: The size of the replay buffer.\n tau: The soft update coefficient.\n hidden_size: The number of neurons in the hidden layers of the actor and critic networks.\n batch_size: The minibatch size for each gradient update.\n noise_stddev: The standard deviation of the exploration noise.\n\n Note:\n In contrast to the original paper, actions are already included in the first layer \n of the Critic and we use a Gaussian distribution instead of an Ornstein Uhlenbeck \n process for exploration noise.\n\n \"\"\"\n\n def __init__(\n self,\n env: gym.Env,\n lr_actor: float = 0.001,\n lr_critic: float = 0.002,\n gamma: float = 0.99,\n buffer_capacity: int = 100000,\n tau: float = 0.005,\n hidden_size: Tuple[int, int] = (400, 300),\n batch_size: int = 64,\n noise_stddev: float = 0.1,\n ):\n self.num_states = env.observation_space.shape[0]\n self.num_actions = env.action_space.shape[0]\n self.max_action = env.action_space.high.item()\n self.min_action = env.action_space.low.item()\n self.gamma = gamma\n self.tau = tau\n self.memory = Buffer(\n self.num_actions, self.num_states, buffer_capacity, batch_size\n )\n self.batch_size = batch_size\n\n self.noise = GaussianActionNoise(\n mean=np.zeros(self.num_actions),\n std_deviation=noise_stddev * np.ones(self.num_actions),\n )\n\n self.actor = Actor(self.num_actions, self.num_states, hidden_size)\n self.critic = Critic(self.num_actions + self.num_states, hidden_size)\n self.target_actor = Actor(self.num_actions, self.num_states, hidden_size)\n self.target_critic = Critic(self.num_actions + self.num_states, hidden_size)\n\n actor_params = optim.get_parameters(self.actor)\n critic_params = optim.get_parameters(self.critic)\n target_actor_params = optim.get_parameters(self.target_actor)\n target_critic_params = optim.get_parameters(self.target_critic)\n\n if DEVICE == \"GPU\":\n [x.gpu_() for x in actor_params + critic_params + target_actor_params + target_critic_params]\n\n self.actor_optimizer = optim.Adam(actor_params, lr_actor)\n self.critic_optimizer = optim.Adam(critic_params, lr_critic)\n\n self.update_network_parameters(tau=1.0)\n\n def update_network_parameters(self, tau: Optional[float] = None) -> None:\n \"\"\"Updates the parameters of the target networks via 'soft updates'.\"\"\"\n if tau is None:\n tau = self.tau\n\n for param, target_param in zip(\n optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)\n ):\n target_param.assign(param * tau + target_param * (1.0 - tau))\n\n for param, target_param in zip(\n optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)\n ):\n target_param.assign(param * tau + target_param * (1.0 - tau))\n\n def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:\n mu = self.actor.forward(state, self.max_action)\n\n if not evaluate:\n mu = mu.add(self.noise())\n\n mu = mu.clip(self.min_action, self.max_action)\n\n return mu.detach().numpy()\n\n def learn(self) -> None:\n \"\"\"Performs a learning step by sampling from replay buffer and updating networks.\"\"\"\n if self.memory.buffer_counter < self.batch_size:\n return\n\n (\n state_batch,\n action_batch,\n reward_batch,\n next_state_batch,\n done_batch,\n ) = self.memory.sample()\n \n target_actions = self.target_actor.forward(next_state_batch, self.max_action)\n y = reward_batch + self.gamma * self.target_critic.forward(\n next_state_batch, target_actions.detach()\n ) * (Tensor.ones(*done_batch.shape, device=DEVICE, requires_grad=False) - done_batch)\n\n self.critic_optimizer.zero_grad()\n critic_value = self.critic.forward(state_batch, action_batch)\n critic_loss = y.detach().sub(critic_value).pow(2).mean()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n self.actor_optimizer.zero_grad()\n actions = self.actor.forward(state_batch, self.max_action)\n critic_value = self.critic.forward(state_batch, actions)\n actor_loss = -critic_value.mean()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n self.update_network_parameters()\n\n\nif __name__ == \"__main__\":\n env = gym.make(\"Pendulum-v1\")\n agent = DeepDeterministicPolicyGradient(env)\n num_episodes = 150\n\n for episode in range(1, num_episodes+1):\n cumulative_reward = 0.0\n prev_state, info = env.reset() # for older gym versions only state is returned, so remove info\n done = False\n\n while not done:\n prev_state = Tensor(prev_state, device=DEVICE, requires_grad=False)\n action = agent.choose_action(prev_state)\n\n state, reward, done, _, info = env.step(action) # for older gym versions there is only one bool, so remove _\n\n cumulative_reward += reward\n\n agent.memory.record((prev_state, action, reward, state, done))\n agent.learn()\n\n if done:\n break\n\n prev_state = state\n\n print(\n f\"Episode {episode}/{num_episodes} - cumulative reward: {cumulative_reward}\"\n )\n", "path": "examples/deep_deterministic_policy_gradient.py"}], "after_files": [{"content": "from typing import Optional, Tuple\nfrom numpy.typing import NDArray\n\nfrom tinygrad.tensor import Tensor\nfrom tinygrad.nn import optim\nfrom tinygrad.helpers import getenv\n\nimport numpy as np\nimport gym\n\n\nDEVICE = \"GPU\" if getenv(\"GPU\") else \"CPU\"\n\n\nclass Actor:\n def __init__(self, num_actions: int, num_states: int, hidden_size: Tuple[int, int] = (400, 300)):\n self.l1 = Tensor.glorot_uniform(num_states, hidden_size[0])\n self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])\n self.mu = Tensor.glorot_uniform(hidden_size[1], num_actions)\n\n def forward(self, state: Tensor, upper_bound: float) -> Tensor:\n out = state.dot(self.l1).relu()\n out = out.dot(self.l2).relu()\n out = out.dot(self.mu).tanh()\n output = out * upper_bound\n\n return output\n\n\nclass Critic:\n def __init__(self, num_inputs: int, hidden_size: Tuple[int, int] = (400, 300)):\n self.l1 = Tensor.glorot_uniform(num_inputs, hidden_size[0])\n self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])\n self.q = Tensor.glorot_uniform(hidden_size[1], 1)\n\n def forward(self, state: Tensor, action: Tensor) -> Tensor:\n inputs = state.cat(action, dim=1)\n out = inputs.dot(self.l1).relu()\n out = out.dot(self.l2).relu()\n q = out.dot(self.q)\n\n return q\n\n\nclass Buffer:\n def __init__(self, num_actions: int, num_states: int, buffer_capacity: int = 100000, batch_size: int = 64):\n self.buffer_capacity = buffer_capacity\n self.batch_size = batch_size\n\n self.buffer_counter = 0\n\n self.state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.action_buffer = np.zeros((self.buffer_capacity, num_actions))\n self.reward_buffer = np.zeros((self.buffer_capacity, 1))\n self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.done_buffer = np.zeros((self.buffer_capacity, 1))\n\n def record(\n self, observations: Tuple[Tensor, NDArray, float, NDArray, bool]\n ) -> None:\n index = self.buffer_counter % self.buffer_capacity\n\n self.state_buffer[index] = observations[0].detach().numpy()\n self.action_buffer[index] = observations[1]\n self.reward_buffer[index] = observations[2]\n self.next_state_buffer[index] = observations[3]\n self.done_buffer[index] = observations[4]\n\n self.buffer_counter += 1\n\n def sample(self) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n record_range = min(self.buffer_counter, self.buffer_capacity)\n batch_indices = np.random.choice(record_range, self.batch_size)\n\n state_batch = Tensor(self.state_buffer[batch_indices], device=DEVICE, requires_grad=False)\n action_batch = Tensor(self.action_buffer[batch_indices], device=DEVICE, requires_grad=False)\n reward_batch = Tensor(self.reward_buffer[batch_indices], device=DEVICE, requires_grad=False)\n next_state_batch = Tensor(self.next_state_buffer[batch_indices], device=DEVICE, requires_grad=False)\n done_batch = Tensor(self.done_buffer[batch_indices], device=DEVICE, requires_grad=False)\n\n return state_batch, action_batch, reward_batch, next_state_batch, done_batch\n\n\nclass GaussianActionNoise:\n def __init__(self, mean: NDArray, std_deviation: NDArray):\n self.mean = mean\n self.std_dev = std_deviation\n\n def __call__(self) -> Tensor:\n return Tensor(\n np.random.default_rng()\n .normal(self.mean, self.std_dev, size=self.mean.shape)\n .astype(np.float32),\n device=DEVICE,\n requires_grad=False,\n )\n\n\nclass DeepDeterministicPolicyGradient:\n \"\"\"Deep Deterministic Policy Gradient (DDPG).\n\n https://arxiv.org/pdf/1509.02971.pdf\n\n Args:\n env: The environment to learn from.\n lr_actor: The learning rate of the actor.\n lr_critic: The learning rate of the critic.\n gamma: The discount factor.\n buffer_capacity: The size of the replay buffer.\n tau: The soft update coefficient.\n hidden_size: The number of neurons in the hidden layers of the actor and critic networks.\n batch_size: The minibatch size for each gradient update.\n noise_stddev: The standard deviation of the exploration noise.\n\n Note:\n In contrast to the original paper, actions are already included in the first layer \n of the Critic and we use a Gaussian distribution instead of an Ornstein Uhlenbeck \n process for exploration noise.\n\n \"\"\"\n\n def __init__(\n self,\n env: gym.Env,\n lr_actor: float = 0.001,\n lr_critic: float = 0.002,\n gamma: float = 0.99,\n buffer_capacity: int = 100000,\n tau: float = 0.005,\n hidden_size: Tuple[int, int] = (400, 300),\n batch_size: int = 64,\n noise_stddev: float = 0.1,\n ):\n self.num_states = env.observation_space.shape[0]\n self.num_actions = env.action_space.shape[0]\n self.max_action = env.action_space.high.item()\n self.min_action = env.action_space.low.item()\n self.gamma = gamma\n self.tau = tau\n self.memory = Buffer(\n self.num_actions, self.num_states, buffer_capacity, batch_size\n )\n self.batch_size = batch_size\n\n self.noise = GaussianActionNoise(\n mean=np.zeros(self.num_actions),\n std_deviation=noise_stddev * np.ones(self.num_actions),\n )\n\n self.actor = Actor(self.num_actions, self.num_states, hidden_size)\n self.critic = Critic(self.num_actions + self.num_states, hidden_size)\n self.target_actor = Actor(self.num_actions, self.num_states, hidden_size)\n self.target_critic = Critic(self.num_actions + self.num_states, hidden_size)\n\n actor_params = optim.get_parameters(self.actor)\n critic_params = optim.get_parameters(self.critic)\n target_actor_params = optim.get_parameters(self.target_actor)\n target_critic_params = optim.get_parameters(self.target_critic)\n\n if DEVICE == \"GPU\":\n [x.gpu_() for x in actor_params + critic_params + target_actor_params + target_critic_params]\n\n self.actor_optimizer = optim.Adam(actor_params, lr_actor)\n self.critic_optimizer = optim.Adam(critic_params, lr_critic)\n\n self.update_network_parameters(tau=1.0)\n\n def update_network_parameters(self, tau: Optional[float] = None) -> None:\n \"\"\"Updates the parameters of the target networks via 'soft updates'.\"\"\"\n if tau is None:\n tau = self.tau\n\n for param, target_param in zip(\n optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)\n ):\n target_param.assign(param.detach() * tau + target_param * (1.0 - tau))\n\n for param, target_param in zip(\n optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)\n ):\n target_param.assign(param.detach() * tau + target_param * (1.0 - tau))\n\n def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:\n mu = self.actor.forward(state, self.max_action)\n\n if not evaluate:\n mu = mu.add(self.noise())\n\n mu = mu.clip(self.min_action, self.max_action)\n\n return mu.detach().numpy()\n\n def learn(self) -> None:\n \"\"\"Performs a learning step by sampling from replay buffer and updating networks.\"\"\"\n if self.memory.buffer_counter < self.batch_size:\n return\n\n (\n state_batch,\n action_batch,\n reward_batch,\n next_state_batch,\n done_batch,\n ) = self.memory.sample()\n \n target_actions = self.target_actor.forward(next_state_batch, self.max_action)\n y = reward_batch + self.gamma * self.target_critic.forward(\n next_state_batch, target_actions.detach()\n ) * (Tensor.ones(*done_batch.shape, device=DEVICE, requires_grad=False) - done_batch)\n\n self.critic_optimizer.zero_grad()\n critic_value = self.critic.forward(state_batch, action_batch)\n critic_loss = y.detach().sub(critic_value).pow(2).mean()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n self.actor_optimizer.zero_grad()\n actions = self.actor.forward(state_batch, self.max_action)\n critic_value = self.critic.forward(state_batch, actions)\n actor_loss = -critic_value.mean()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n self.update_network_parameters()\n\n\nif __name__ == \"__main__\":\n env = gym.make(\"Pendulum-v1\")\n agent = DeepDeterministicPolicyGradient(env)\n num_episodes = 150\n\n for episode in range(1, num_episodes+1):\n cumulative_reward = 0.0\n prev_state, info = env.reset() # for older gym versions only state is returned, so remove info\n done = False\n\n while not done:\n prev_state = Tensor(prev_state, device=DEVICE, requires_grad=False)\n action = agent.choose_action(prev_state)\n\n state, reward, done, _, info = env.step(action) # for older gym versions there is only one bool, so remove _\n\n cumulative_reward += reward\n\n agent.memory.record((prev_state, action, reward, state, done))\n agent.learn()\n\n if done:\n break\n\n prev_state = state\n\n print(\n f\"Episode {episode}/{num_episodes} - cumulative reward: {cumulative_reward}\"\n )\n", "path": "examples/deep_deterministic_policy_gradient.py"}]} | 3,380 | 230 |
gh_patches_debug_17339 | rasdani/github-patches | git_diff | Pycord-Development__pycord-1094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Optional Modal InputText fields with existing values do not accept empty strings
### Summary
Submitted Empty InputText fields revert to stored value
### Reproduction Steps
1. Create a Modal with an `InputText` that is both `required=False` and has a `value`
2. Delete the text in the Modal and submit
3. Inspect the Modal's children in the callback.
### Minimal Reproducible Code
_No response_
### Expected Results
Expected: The incoming field (e.g. `self.children[0].value`) should be empty
### Actual Results
Actual: The field retains its previous/existing `value` instead of being overwritten by the empty string.
### Intents
members
### System Information
Python 3.9
Pycord 2.0.0b4
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
I did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.
Optional Modal InputText fields with existing values do not accept empty strings
### Summary
Submitted Empty InputText fields revert to stored value
### Reproduction Steps
1. Create a Modal with an `InputText` that is both `required=False` and has a `value`
2. Delete the text in the Modal and submit
3. Inspect the Modal's children in the callback.
### Minimal Reproducible Code
_No response_
### Expected Results
Expected: The incoming field (e.g. `self.children[0].value`) should be empty
### Actual Results
Actual: The field retains its previous/existing `value` instead of being overwritten by the empty string.
### Intents
members
### System Information
Python 3.9
Pycord 2.0.0b4
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
I did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/ui/input_text.py`
Content:
```
1 from __future__ import annotations
2
3 import os
4 from typing import TYPE_CHECKING, Optional
5
6 from ..components import InputText as InputTextComponent
7 from ..enums import ComponentType, InputTextStyle
8 from ..utils import MISSING
9
10 __all__ = ("InputText",)
11
12 if TYPE_CHECKING:
13 from ..types.components import InputText as InputTextComponentPayload
14
15
16 class InputText:
17 """Represents a UI text input field.
18
19 Parameters
20 ----------
21 style: :class:`discord.InputTextStyle`
22 The style of the input text field.
23 custom_id: Optional[:class:`str`]
24 The ID of the input text field that gets received during an interaction.
25 label: :class:`str`
26 The label for the input text field.
27 Must be 45 characters or fewer.
28 placeholder: Optional[:class:`str`]
29 The placeholder text that is shown if nothing is selected, if any.
30 Must be 100 characters or fewer.
31 min_length: Optional[:class:`int`]
32 The minimum number of characters that must be entered.
33 Defaults to 0 and must be less than 4000.
34 max_length: Optional[:class:`int`]
35 The maximum number of characters that can be entered.
36 Must be between 1 and 4000.
37 required: Optional[:class:`bool`]
38 Whether the input text field is required or not. Defaults to `True`.
39 value: Optional[:class:`str`]
40 Pre-fills the input text field with this value.
41 Must be 4000 characters or fewer.
42 row: Optional[:class:`int`]
43 The relative row this input text field belongs to. A modal dialog can only have 5
44 rows. By default, items are arranged automatically into those 5 rows. If you'd
45 like to control the relative positioning of the row then passing an index is advised.
46 For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
47 ordering. The row number must be between 0 and 4 (i.e. zero indexed).
48 """
49
50 def __init__(
51 self,
52 *,
53 style: InputTextStyle = InputTextStyle.short,
54 custom_id: str = MISSING,
55 label: str,
56 placeholder: Optional[str] = None,
57 min_length: Optional[int] = None,
58 max_length: Optional[int] = None,
59 required: Optional[bool] = True,
60 value: Optional[str] = None,
61 row: Optional[int] = None,
62 ):
63 super().__init__()
64 custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id
65 if not (isinstance(custom_id, str) or custom_id is None):
66 raise TypeError(f"expected custom_id to be str, not {custom_id.__class__.__name__}")
67
68 self._underlying = InputTextComponent._raw_construct(
69 type=ComponentType.input_text,
70 style=style,
71 custom_id=custom_id,
72 label=label,
73 placeholder=placeholder,
74 min_length=min_length,
75 max_length=max_length,
76 required=required,
77 value=value,
78 )
79 self._input_value = None
80 self.row = row
81 self._rendered_row: Optional[int] = None
82
83 @property
84 def type(self) -> ComponentType:
85 return self._underlying.type
86
87 @property
88 def style(self) -> InputTextStyle:
89 """:class:`discord.InputTextStyle`: The style of the input text field."""
90 return self._underlying.style
91
92 @style.setter
93 def style(self, value: InputTextStyle):
94 if not isinstance(value, InputTextStyle):
95 raise TypeError(f"style must be of type InputTextStyle not {value.__class__}")
96 self._underlying.style = value
97
98 @property
99 def custom_id(self) -> str:
100 """:class:`str`: The ID of the input text field that gets received during an interaction."""
101 return self._underlying.custom_id
102
103 @custom_id.setter
104 def custom_id(self, value: str):
105 if not isinstance(value, str):
106 raise TypeError(f"custom_id must be None or str not {value.__class__}")
107 self._underlying.custom_id = value
108
109 @property
110 def label(self) -> str:
111 """:class:`str`: The label of the input text field."""
112 return self._underlying.label
113
114 @label.setter
115 def label(self, value: str):
116 if not isinstance(value, str):
117 raise TypeError(f"label should be str not {value.__class__}")
118 self._underlying.label = value
119
120 @property
121 def placeholder(self) -> Optional[str]:
122 """Optional[:class:`str`]: The placeholder text that is shown before anything is entered, if any."""
123 return self._underlying.placeholder
124
125 @placeholder.setter
126 def placeholder(self, value: Optional[str]):
127 if value and not isinstance(value, str):
128 raise TypeError(f"placeholder must be None or str not {value.__class__}") # type: ignore
129 self._underlying.placeholder = value
130
131 @property
132 def min_length(self) -> Optional[int]:
133 """Optional[:class:`int`]: The minimum number of characters that must be entered. Defaults to `0`."""
134 return self._underlying.min_length
135
136 @min_length.setter
137 def min_length(self, value: Optional[int]):
138 if value and not isinstance(value, int):
139 raise TypeError(f"min_length must be None or int not {value.__class__}") # type: ignore
140 self._underlying.min_length = value
141
142 @property
143 def max_length(self) -> Optional[int]:
144 """Optional[:class:`int`]: The maximum number of characters that can be entered."""
145 return self._underlying.max_length
146
147 @max_length.setter
148 def max_length(self, value: Optional[int]):
149 if value and not isinstance(value, int):
150 raise TypeError(f"min_length must be None or int not {value.__class__}") # type: ignore
151 self._underlying.max_length = value
152
153 @property
154 def required(self) -> Optional[bool]:
155 """Optional[:class:`bool`]: Whether the input text field is required or not. Defaults to `True`."""
156 return self._underlying.required
157
158 @required.setter
159 def required(self, value: Optional[bool]):
160 if not isinstance(value, bool):
161 raise TypeError(f"required must be bool not {value.__class__}") # type: ignore
162 self._underlying.required = bool(value)
163
164 @property
165 def value(self) -> Optional[str]:
166 """Optional[:class:`str`]: The value entered in the text field."""
167 return self._input_value or self._underlying.value
168
169 @value.setter
170 def value(self, value: Optional[str]):
171 if value and not isinstance(value, str):
172 raise TypeError(f"value must be None or str not {value.__class__}") # type: ignore
173 self._underlying.value = value
174
175 @property
176 def width(self) -> int:
177 return 5
178
179 def to_component_dict(self) -> InputTextComponentPayload:
180 return self._underlying.to_dict()
181
182 def refresh_state(self, data) -> None:
183 self._input_value = data["value"]
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/ui/input_text.py b/discord/ui/input_text.py
--- a/discord/ui/input_text.py
+++ b/discord/ui/input_text.py
@@ -76,7 +76,7 @@
required=required,
value=value,
)
- self._input_value = None
+ self._input_value = False
self.row = row
self._rendered_row: Optional[int] = None
@@ -164,7 +164,10 @@
@property
def value(self) -> Optional[str]:
"""Optional[:class:`str`]: The value entered in the text field."""
- return self._input_value or self._underlying.value
+ if self._input_value is not False:
+ # only False on init, otherwise the value was either set or cleared
+ return self._input_value # type: ignore
+ return self._underlying.value
@value.setter
def value(self, value: Optional[str]):
| {"golden_diff": "diff --git a/discord/ui/input_text.py b/discord/ui/input_text.py\n--- a/discord/ui/input_text.py\n+++ b/discord/ui/input_text.py\n@@ -76,7 +76,7 @@\n required=required,\n value=value,\n )\n- self._input_value = None\n+ self._input_value = False\n self.row = row\n self._rendered_row: Optional[int] = None\n \n@@ -164,7 +164,10 @@\n @property\n def value(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The value entered in the text field.\"\"\"\n- return self._input_value or self._underlying.value\n+ if self._input_value is not False:\n+ # only False on init, otherwise the value was either set or cleared\n+ return self._input_value # type: ignore\n+ return self._underlying.value\n \n @value.setter\n def value(self, value: Optional[str]):\n", "issue": "Optional Modal InputText fields with existing values do not accept empty strings\n### Summary\n\nSubmitted Empty InputText fields revert to stored value\n\n### Reproduction Steps\n\n1. Create a Modal with an `InputText` that is both `required=False` and has a `value`\r\n2. Delete the text in the Modal and submit\r\n3. Inspect the Modal's children in the callback.\n\n### Minimal Reproducible Code\n\n_No response_\n\n### Expected Results\n\nExpected: The incoming field (e.g. `self.children[0].value`) should be empty\n\n### Actual Results\n\nActual: The field retains its previous/existing `value` instead of being overwritten by the empty string.\n\n### Intents\n\nmembers\n\n### System Information\n\nPython 3.9\r\nPycord 2.0.0b4\n\n### Checklist\n\n- [X] I have searched the open issues for duplicates.\n- [X] I have shown the entire traceback, if possible.\n- [X] I have removed my token from display, if visible.\n\n### Additional Context\n\nI did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.\nOptional Modal InputText fields with existing values do not accept empty strings\n### Summary\n\nSubmitted Empty InputText fields revert to stored value\n\n### Reproduction Steps\n\n1. Create a Modal with an `InputText` that is both `required=False` and has a `value`\r\n2. Delete the text in the Modal and submit\r\n3. Inspect the Modal's children in the callback.\n\n### Minimal Reproducible Code\n\n_No response_\n\n### Expected Results\n\nExpected: The incoming field (e.g. `self.children[0].value`) should be empty\n\n### Actual Results\n\nActual: The field retains its previous/existing `value` instead of being overwritten by the empty string.\n\n### Intents\n\nmembers\n\n### System Information\n\nPython 3.9\r\nPycord 2.0.0b4\n\n### Checklist\n\n- [X] I have searched the open issues for duplicates.\n- [X] I have shown the entire traceback, if possible.\n- [X] I have removed my token from display, if visible.\n\n### Additional Context\n\nI did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nfrom typing import TYPE_CHECKING, Optional\n\nfrom ..components import InputText as InputTextComponent\nfrom ..enums import ComponentType, InputTextStyle\nfrom ..utils import MISSING\n\n__all__ = (\"InputText\",)\n\nif TYPE_CHECKING:\n from ..types.components import InputText as InputTextComponentPayload\n\n\nclass InputText:\n \"\"\"Represents a UI text input field.\n\n Parameters\n ----------\n style: :class:`discord.InputTextStyle`\n The style of the input text field.\n custom_id: Optional[:class:`str`]\n The ID of the input text field that gets received during an interaction.\n label: :class:`str`\n The label for the input text field.\n Must be 45 characters or fewer.\n placeholder: Optional[:class:`str`]\n The placeholder text that is shown if nothing is selected, if any.\n Must be 100 characters or fewer.\n min_length: Optional[:class:`int`]\n The minimum number of characters that must be entered.\n Defaults to 0 and must be less than 4000.\n max_length: Optional[:class:`int`]\n The maximum number of characters that can be entered.\n Must be between 1 and 4000.\n required: Optional[:class:`bool`]\n Whether the input text field is required or not. Defaults to `True`.\n value: Optional[:class:`str`]\n Pre-fills the input text field with this value.\n Must be 4000 characters or fewer.\n row: Optional[:class:`int`]\n The relative row this input text field belongs to. A modal dialog can only have 5\n rows. By default, items are arranged automatically into those 5 rows. If you'd\n like to control the relative positioning of the row then passing an index is advised.\n For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic\n ordering. The row number must be between 0 and 4 (i.e. zero indexed).\n \"\"\"\n\n def __init__(\n self,\n *,\n style: InputTextStyle = InputTextStyle.short,\n custom_id: str = MISSING,\n label: str,\n placeholder: Optional[str] = None,\n min_length: Optional[int] = None,\n max_length: Optional[int] = None,\n required: Optional[bool] = True,\n value: Optional[str] = None,\n row: Optional[int] = None,\n ):\n super().__init__()\n custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id\n if not (isinstance(custom_id, str) or custom_id is None):\n raise TypeError(f\"expected custom_id to be str, not {custom_id.__class__.__name__}\")\n\n self._underlying = InputTextComponent._raw_construct(\n type=ComponentType.input_text,\n style=style,\n custom_id=custom_id,\n label=label,\n placeholder=placeholder,\n min_length=min_length,\n max_length=max_length,\n required=required,\n value=value,\n )\n self._input_value = None\n self.row = row\n self._rendered_row: Optional[int] = None\n\n @property\n def type(self) -> ComponentType:\n return self._underlying.type\n\n @property\n def style(self) -> InputTextStyle:\n \"\"\":class:`discord.InputTextStyle`: The style of the input text field.\"\"\"\n return self._underlying.style\n\n @style.setter\n def style(self, value: InputTextStyle):\n if not isinstance(value, InputTextStyle):\n raise TypeError(f\"style must be of type InputTextStyle not {value.__class__}\")\n self._underlying.style = value\n\n @property\n def custom_id(self) -> str:\n \"\"\":class:`str`: The ID of the input text field that gets received during an interaction.\"\"\"\n return self._underlying.custom_id\n\n @custom_id.setter\n def custom_id(self, value: str):\n if not isinstance(value, str):\n raise TypeError(f\"custom_id must be None or str not {value.__class__}\")\n self._underlying.custom_id = value\n\n @property\n def label(self) -> str:\n \"\"\":class:`str`: The label of the input text field.\"\"\"\n return self._underlying.label\n\n @label.setter\n def label(self, value: str):\n if not isinstance(value, str):\n raise TypeError(f\"label should be str not {value.__class__}\")\n self._underlying.label = value\n\n @property\n def placeholder(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The placeholder text that is shown before anything is entered, if any.\"\"\"\n return self._underlying.placeholder\n\n @placeholder.setter\n def placeholder(self, value: Optional[str]):\n if value and not isinstance(value, str):\n raise TypeError(f\"placeholder must be None or str not {value.__class__}\") # type: ignore\n self._underlying.placeholder = value\n\n @property\n def min_length(self) -> Optional[int]:\n \"\"\"Optional[:class:`int`]: The minimum number of characters that must be entered. Defaults to `0`.\"\"\"\n return self._underlying.min_length\n\n @min_length.setter\n def min_length(self, value: Optional[int]):\n if value and not isinstance(value, int):\n raise TypeError(f\"min_length must be None or int not {value.__class__}\") # type: ignore\n self._underlying.min_length = value\n\n @property\n def max_length(self) -> Optional[int]:\n \"\"\"Optional[:class:`int`]: The maximum number of characters that can be entered.\"\"\"\n return self._underlying.max_length\n\n @max_length.setter\n def max_length(self, value: Optional[int]):\n if value and not isinstance(value, int):\n raise TypeError(f\"min_length must be None or int not {value.__class__}\") # type: ignore\n self._underlying.max_length = value\n\n @property\n def required(self) -> Optional[bool]:\n \"\"\"Optional[:class:`bool`]: Whether the input text field is required or not. Defaults to `True`.\"\"\"\n return self._underlying.required\n\n @required.setter\n def required(self, value: Optional[bool]):\n if not isinstance(value, bool):\n raise TypeError(f\"required must be bool not {value.__class__}\") # type: ignore\n self._underlying.required = bool(value)\n\n @property\n def value(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The value entered in the text field.\"\"\"\n return self._input_value or self._underlying.value\n\n @value.setter\n def value(self, value: Optional[str]):\n if value and not isinstance(value, str):\n raise TypeError(f\"value must be None or str not {value.__class__}\") # type: ignore\n self._underlying.value = value\n\n @property\n def width(self) -> int:\n return 5\n\n def to_component_dict(self) -> InputTextComponentPayload:\n return self._underlying.to_dict()\n\n def refresh_state(self, data) -> None:\n self._input_value = data[\"value\"]\n", "path": "discord/ui/input_text.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport os\nfrom typing import TYPE_CHECKING, Optional\n\nfrom ..components import InputText as InputTextComponent\nfrom ..enums import ComponentType, InputTextStyle\nfrom ..utils import MISSING\n\n__all__ = (\"InputText\",)\n\nif TYPE_CHECKING:\n from ..types.components import InputText as InputTextComponentPayload\n\n\nclass InputText:\n \"\"\"Represents a UI text input field.\n\n Parameters\n ----------\n style: :class:`discord.InputTextStyle`\n The style of the input text field.\n custom_id: Optional[:class:`str`]\n The ID of the input text field that gets received during an interaction.\n label: :class:`str`\n The label for the input text field.\n Must be 45 characters or fewer.\n placeholder: Optional[:class:`str`]\n The placeholder text that is shown if nothing is selected, if any.\n Must be 100 characters or fewer.\n min_length: Optional[:class:`int`]\n The minimum number of characters that must be entered.\n Defaults to 0 and must be less than 4000.\n max_length: Optional[:class:`int`]\n The maximum number of characters that can be entered.\n Must be between 1 and 4000.\n required: Optional[:class:`bool`]\n Whether the input text field is required or not. Defaults to `True`.\n value: Optional[:class:`str`]\n Pre-fills the input text field with this value.\n Must be 4000 characters or fewer.\n row: Optional[:class:`int`]\n The relative row this input text field belongs to. A modal dialog can only have 5\n rows. By default, items are arranged automatically into those 5 rows. If you'd\n like to control the relative positioning of the row then passing an index is advised.\n For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic\n ordering. The row number must be between 0 and 4 (i.e. zero indexed).\n \"\"\"\n\n def __init__(\n self,\n *,\n style: InputTextStyle = InputTextStyle.short,\n custom_id: str = MISSING,\n label: str,\n placeholder: Optional[str] = None,\n min_length: Optional[int] = None,\n max_length: Optional[int] = None,\n required: Optional[bool] = True,\n value: Optional[str] = None,\n row: Optional[int] = None,\n ):\n super().__init__()\n custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id\n if not (isinstance(custom_id, str) or custom_id is None):\n raise TypeError(f\"expected custom_id to be str, not {custom_id.__class__.__name__}\")\n\n self._underlying = InputTextComponent._raw_construct(\n type=ComponentType.input_text,\n style=style,\n custom_id=custom_id,\n label=label,\n placeholder=placeholder,\n min_length=min_length,\n max_length=max_length,\n required=required,\n value=value,\n )\n self._input_value = False\n self.row = row\n self._rendered_row: Optional[int] = None\n\n @property\n def type(self) -> ComponentType:\n return self._underlying.type\n\n @property\n def style(self) -> InputTextStyle:\n \"\"\":class:`discord.InputTextStyle`: The style of the input text field.\"\"\"\n return self._underlying.style\n\n @style.setter\n def style(self, value: InputTextStyle):\n if not isinstance(value, InputTextStyle):\n raise TypeError(f\"style must be of type InputTextStyle not {value.__class__}\")\n self._underlying.style = value\n\n @property\n def custom_id(self) -> str:\n \"\"\":class:`str`: The ID of the input text field that gets received during an interaction.\"\"\"\n return self._underlying.custom_id\n\n @custom_id.setter\n def custom_id(self, value: str):\n if not isinstance(value, str):\n raise TypeError(f\"custom_id must be None or str not {value.__class__}\")\n self._underlying.custom_id = value\n\n @property\n def label(self) -> str:\n \"\"\":class:`str`: The label of the input text field.\"\"\"\n return self._underlying.label\n\n @label.setter\n def label(self, value: str):\n if not isinstance(value, str):\n raise TypeError(f\"label should be str not {value.__class__}\")\n self._underlying.label = value\n\n @property\n def placeholder(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The placeholder text that is shown before anything is entered, if any.\"\"\"\n return self._underlying.placeholder\n\n @placeholder.setter\n def placeholder(self, value: Optional[str]):\n if value and not isinstance(value, str):\n raise TypeError(f\"placeholder must be None or str not {value.__class__}\") # type: ignore\n self._underlying.placeholder = value\n\n @property\n def min_length(self) -> Optional[int]:\n \"\"\"Optional[:class:`int`]: The minimum number of characters that must be entered. Defaults to `0`.\"\"\"\n return self._underlying.min_length\n\n @min_length.setter\n def min_length(self, value: Optional[int]):\n if value and not isinstance(value, int):\n raise TypeError(f\"min_length must be None or int not {value.__class__}\") # type: ignore\n self._underlying.min_length = value\n\n @property\n def max_length(self) -> Optional[int]:\n \"\"\"Optional[:class:`int`]: The maximum number of characters that can be entered.\"\"\"\n return self._underlying.max_length\n\n @max_length.setter\n def max_length(self, value: Optional[int]):\n if value and not isinstance(value, int):\n raise TypeError(f\"min_length must be None or int not {value.__class__}\") # type: ignore\n self._underlying.max_length = value\n\n @property\n def required(self) -> Optional[bool]:\n \"\"\"Optional[:class:`bool`]: Whether the input text field is required or not. Defaults to `True`.\"\"\"\n return self._underlying.required\n\n @required.setter\n def required(self, value: Optional[bool]):\n if not isinstance(value, bool):\n raise TypeError(f\"required must be bool not {value.__class__}\") # type: ignore\n self._underlying.required = bool(value)\n\n @property\n def value(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The value entered in the text field.\"\"\"\n if self._input_value is not False:\n # only False on init, otherwise the value was either set or cleared\n return self._input_value # type: ignore\n return self._underlying.value\n\n @value.setter\n def value(self, value: Optional[str]):\n if value and not isinstance(value, str):\n raise TypeError(f\"value must be None or str not {value.__class__}\") # type: ignore\n self._underlying.value = value\n\n @property\n def width(self) -> int:\n return 5\n\n def to_component_dict(self) -> InputTextComponentPayload:\n return self._underlying.to_dict()\n\n def refresh_state(self, data) -> None:\n self._input_value = data[\"value\"]\n", "path": "discord/ui/input_text.py"}]} | 2,789 | 220 |
gh_patches_debug_15806 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1409 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix cymbol command on Ubuntu 18.04
There's a cymbol test that fails on the Ubuntu 18.04 CI, this issue is there to track it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/commands/cymbol.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 Add, load, show, edit, or delete symbols for custom structures.
6
7 For the generation of the symbols g++/gcc is being used under the hood.
8
9 In case of remote debugging a binary which is not native to your architecture it
10 is advised to configure the 'gcc-config-path' config parameter to your own cross-platform
11 gnu gcc compiled toolchain for your target architecture.
12
13 You are advised to configure the 'cymbol-editor' config parameter to the path of your
14 favorite text editor. Otherwise cymbol exapnds $EDITOR and $VISUAL environment variables
15 to find the path to the default text editor.
16 """
17
18 import argparse
19 import functools
20 import os
21 import subprocess
22 import sys
23 import tempfile
24
25 import gdb
26
27 import pwndbg
28 import pwndbg.commands
29 import pwndbg.gdblib.arch
30 import pwndbg.lib.gcc
31 import pwndbg.lib.tempfile
32 from pwndbg.color import message
33
34 gcc_compiler_path = pwndbg.gdblib.config.add_param(
35 "gcc-compiler-path",
36 "",
37 "Path to the gcc/g++ toolchain for generating imported symbols",
38 )
39
40 cymbol_editor = pwndbg.gdblib.config.add_param(
41 "cymbol-editor", "", "Path to the editor for editing custom structures"
42 )
43
44 # Remeber loaded symbols. This would be useful for 'remove-symbol-file'.
45 loaded_symbols = {}
46
47 # Where generated symbol source files are saved.
48 pwndbg_cachedir = pwndbg.lib.tempfile.cachedir("custom-symbols")
49
50
51 def unload_loaded_symbol(custom_structure_name):
52 custom_structure_symbols_file = loaded_symbols.get(custom_structure_name)
53 if custom_structure_symbols_file is not None:
54 gdb.execute(f"remove-symbol-file {custom_structure_symbols_file}")
55 loaded_symbols.pop(custom_structure_name)
56
57
58 def OnlyWhenStructFileExists(func):
59 @functools.wraps(func)
60 def wrapper(custom_structure_name):
61 pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + ".c"
62 if not os.path.exists(pwndbg_custom_structure_path):
63 print(message.error("No custom structure was found with the given name!"))
64 return
65 return func(custom_structure_name, pwndbg_custom_structure_path)
66
67 return wrapper
68
69
70 def generate_debug_symbols(custom_structure_path, pwndbg_debug_symbols_output_file=None):
71 if not pwndbg_debug_symbols_output_file:
72 _, pwndbg_debug_symbols_output_file = tempfile.mkstemp(prefix="custom-", suffix=".dbg")
73
74 # -fno-eliminate-unused-debug-types is a handy gcc flag that lets us extract debug symbols from non-used defined structures.
75 gcc_extra_flags = [
76 custom_structure_path,
77 "-c",
78 "-g",
79 "-fno-eliminate-unused-debug-types",
80 "-o",
81 pwndbg_debug_symbols_output_file,
82 ]
83
84 # TODO: implement remote debugging support.
85 gcc_flags = pwndbg.lib.gcc.which(pwndbg.gdblib.arch)
86 if gcc_compiler_path != "":
87 gcc_flags[0] = gcc_compiler_path
88
89 gcc_cmd = gcc_flags + gcc_extra_flags
90
91 try:
92 subprocess.run(gcc_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
93 except subprocess.CalledProcessError as exception:
94 print(message.error(exception))
95 print(
96 message.error(
97 "Failed to compile the .c file with custom structures. Please fix any compilation errors there may be."
98 )
99 )
100 return None
101 except Exception as exception:
102 print(message.error(exception))
103 print(message.error("An error occured while generating the debug symbols."))
104 return None
105
106 return pwndbg_debug_symbols_output_file
107
108
109 def add_custom_structure(custom_structure_name):
110 pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + ".c"
111
112 if os.path.exists(pwndbg_custom_structure_path):
113 option = input(
114 message.notice(
115 "A custom structure was found with the given name, would you like to overwrite it? [y/n] "
116 )
117 )
118 if option != "y":
119 return
120
121 print(
122 message.notice("Enter your custom structure in a C header style, press Ctrl+D to save:\n")
123 )
124
125 custom_structures_source = sys.stdin.read().strip()
126 if custom_structures_source == "":
127 print(message.notice("An empty structure is entered, skipping ..."))
128 return
129
130 with open(pwndbg_custom_structure_path, "w") as f:
131 f.write(custom_structures_source)
132
133 # Avoid checking for file existance. Call the decorator wrapper directly.
134 load_custom_structure.__wrapped__(custom_structure_name, pwndbg_custom_structure_path)
135
136
137 @OnlyWhenStructFileExists
138 def edit_custom_structure(custom_structure_name, custom_structure_path):
139
140 # Lookup an editor to use for editing the custom structure.
141 editor_preference = os.getenv("EDITOR")
142 if not editor_preference:
143 editor_preference = os.getenv("VISUAL")
144 if not editor_preference:
145 editor_preference = "vi"
146
147 if cymbol_editor != "":
148 editor_preference = cymbol_editor
149
150 try:
151 subprocess.run(
152 [editor_preference, custom_structure_path],
153 check=True,
154 )
155 except Exception as exception:
156 print(message.error("An error occured during opening the source file."))
157 print(message.error(f"Path to the custom structure: {custom_structure_path}"))
158 print(message.error("Please try to manually edit the structure."))
159 print(
160 message.error(
161 '\nTry to set a path to an editor with:\n\tset "cymbol-editor" /usr/bin/nano'
162 )
163 )
164 return
165
166 input(message.notice("Press enter when finished editing."))
167
168 load_custom_structure(custom_structure_name)
169
170
171 @OnlyWhenStructFileExists
172 def remove_custom_structure(custom_structure_name, custom_structure_path):
173 unload_loaded_symbol(custom_structure_name)
174 os.remove(custom_structure_path)
175 print(message.success("Symbols are removed!"))
176
177
178 @OnlyWhenStructFileExists
179 def load_custom_structure(custom_structure_name, custom_structure_path):
180 unload_loaded_symbol(custom_structure_name)
181 pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)
182 if not pwndbg_debug_symbols_output_file:
183 return # generate_debug_symbols prints on failures
184 gdb.execute(f"add-symbol-file {pwndbg_debug_symbols_output_file}", to_string=True)
185 loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file
186 print(message.success("Symbols are loaded!"))
187
188
189 @OnlyWhenStructFileExists
190 def show_custom_structure(custom_structure_name, custom_structure_path):
191 # Call wrapper .func() to avoid memoization.
192 highlighted_source = pwndbg.pwndbg.commands.context.get_highlight_source.func(
193 custom_structure_path
194 )
195 print("\n".join(highlighted_source))
196
197
198 parser = argparse.ArgumentParser(
199 description="Add, show, load, edit, or delete custom structures in plain C"
200 )
201 parser.add_argument(
202 "-a",
203 "--add",
204 metavar="name",
205 help="Add a new custom structure",
206 default=None,
207 type=str,
208 )
209 parser.add_argument(
210 "-r",
211 "--remove",
212 metavar="name",
213 help="Remove an existing custom structure",
214 default=None,
215 type=str,
216 )
217 parser.add_argument(
218 "-e",
219 "--edit",
220 metavar="name",
221 help="Edit an existing custom structure",
222 default=None,
223 type=str,
224 )
225 parser.add_argument(
226 "-l",
227 "--load",
228 metavar="name",
229 help="Load an existing custom structure",
230 default=None,
231 type=str,
232 )
233 parser.add_argument(
234 "-s",
235 "--show",
236 metavar="name",
237 help="Show the source code of an existing custom structure",
238 default=None,
239 type=str,
240 )
241
242
243 @pwndbg.commands.ArgparsedCommand(parser)
244 @pwndbg.commands.OnlyAmd64
245 @pwndbg.commands.OnlyWhenRunning
246 def cymbol(add, remove, edit, load, show):
247 if add:
248 add_custom_structure(add)
249 elif remove:
250 remove_custom_structure(remove)
251 elif edit:
252 edit_custom_structure(edit)
253 elif load:
254 load_custom_structure(load)
255 elif show:
256 show_custom_structure(show)
257 else:
258 parser.print_help()
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/cymbol.py b/pwndbg/commands/cymbol.py
--- a/pwndbg/commands/cymbol.py
+++ b/pwndbg/commands/cymbol.py
@@ -181,7 +181,10 @@
pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)
if not pwndbg_debug_symbols_output_file:
return # generate_debug_symbols prints on failures
- gdb.execute(f"add-symbol-file {pwndbg_debug_symbols_output_file}", to_string=True)
+ # Old GDB versions (e.g. 8.2) requires addr argument in add-symbol-file
+ # we set that address to which to load the symbols to 0 since it doesn't matter here
+ # (because we are only loading types information)
+ gdb.execute(f"add-symbol-file {pwndbg_debug_symbols_output_file} 0", to_string=True)
loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file
print(message.success("Symbols are loaded!"))
| {"golden_diff": "diff --git a/pwndbg/commands/cymbol.py b/pwndbg/commands/cymbol.py\n--- a/pwndbg/commands/cymbol.py\n+++ b/pwndbg/commands/cymbol.py\n@@ -181,7 +181,10 @@\n pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)\n if not pwndbg_debug_symbols_output_file:\n return # generate_debug_symbols prints on failures\n- gdb.execute(f\"add-symbol-file {pwndbg_debug_symbols_output_file}\", to_string=True)\n+ # Old GDB versions (e.g. 8.2) requires addr argument in add-symbol-file\n+ # we set that address to which to load the symbols to 0 since it doesn't matter here\n+ # (because we are only loading types information)\n+ gdb.execute(f\"add-symbol-file {pwndbg_debug_symbols_output_file} 0\", to_string=True)\n loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file\n print(message.success(\"Symbols are loaded!\"))\n", "issue": "Fix cymbol command on Ubuntu 18.04\nThere's a cymbol test that fails on the Ubuntu 18.04 CI, this issue is there to track it\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAdd, load, show, edit, or delete symbols for custom structures.\n\nFor the generation of the symbols g++/gcc is being used under the hood.\n\nIn case of remote debugging a binary which is not native to your architecture it\nis advised to configure the 'gcc-config-path' config parameter to your own cross-platform\ngnu gcc compiled toolchain for your target architecture.\n\nYou are advised to configure the 'cymbol-editor' config parameter to the path of your\nfavorite text editor. Otherwise cymbol exapnds $EDITOR and $VISUAL environment variables\nto find the path to the default text editor.\n\"\"\"\n\nimport argparse\nimport functools\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nimport pwndbg.lib.gcc\nimport pwndbg.lib.tempfile\nfrom pwndbg.color import message\n\ngcc_compiler_path = pwndbg.gdblib.config.add_param(\n \"gcc-compiler-path\",\n \"\",\n \"Path to the gcc/g++ toolchain for generating imported symbols\",\n)\n\ncymbol_editor = pwndbg.gdblib.config.add_param(\n \"cymbol-editor\", \"\", \"Path to the editor for editing custom structures\"\n)\n\n# Remeber loaded symbols. This would be useful for 'remove-symbol-file'.\nloaded_symbols = {}\n\n# Where generated symbol source files are saved.\npwndbg_cachedir = pwndbg.lib.tempfile.cachedir(\"custom-symbols\")\n\n\ndef unload_loaded_symbol(custom_structure_name):\n custom_structure_symbols_file = loaded_symbols.get(custom_structure_name)\n if custom_structure_symbols_file is not None:\n gdb.execute(f\"remove-symbol-file {custom_structure_symbols_file}\")\n loaded_symbols.pop(custom_structure_name)\n\n\ndef OnlyWhenStructFileExists(func):\n @functools.wraps(func)\n def wrapper(custom_structure_name):\n pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + \".c\"\n if not os.path.exists(pwndbg_custom_structure_path):\n print(message.error(\"No custom structure was found with the given name!\"))\n return\n return func(custom_structure_name, pwndbg_custom_structure_path)\n\n return wrapper\n\n\ndef generate_debug_symbols(custom_structure_path, pwndbg_debug_symbols_output_file=None):\n if not pwndbg_debug_symbols_output_file:\n _, pwndbg_debug_symbols_output_file = tempfile.mkstemp(prefix=\"custom-\", suffix=\".dbg\")\n\n # -fno-eliminate-unused-debug-types is a handy gcc flag that lets us extract debug symbols from non-used defined structures.\n gcc_extra_flags = [\n custom_structure_path,\n \"-c\",\n \"-g\",\n \"-fno-eliminate-unused-debug-types\",\n \"-o\",\n pwndbg_debug_symbols_output_file,\n ]\n\n # TODO: implement remote debugging support.\n gcc_flags = pwndbg.lib.gcc.which(pwndbg.gdblib.arch)\n if gcc_compiler_path != \"\":\n gcc_flags[0] = gcc_compiler_path\n\n gcc_cmd = gcc_flags + gcc_extra_flags\n\n try:\n subprocess.run(gcc_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n except subprocess.CalledProcessError as exception:\n print(message.error(exception))\n print(\n message.error(\n \"Failed to compile the .c file with custom structures. Please fix any compilation errors there may be.\"\n )\n )\n return None\n except Exception as exception:\n print(message.error(exception))\n print(message.error(\"An error occured while generating the debug symbols.\"))\n return None\n\n return pwndbg_debug_symbols_output_file\n\n\ndef add_custom_structure(custom_structure_name):\n pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + \".c\"\n\n if os.path.exists(pwndbg_custom_structure_path):\n option = input(\n message.notice(\n \"A custom structure was found with the given name, would you like to overwrite it? [y/n] \"\n )\n )\n if option != \"y\":\n return\n\n print(\n message.notice(\"Enter your custom structure in a C header style, press Ctrl+D to save:\\n\")\n )\n\n custom_structures_source = sys.stdin.read().strip()\n if custom_structures_source == \"\":\n print(message.notice(\"An empty structure is entered, skipping ...\"))\n return\n\n with open(pwndbg_custom_structure_path, \"w\") as f:\n f.write(custom_structures_source)\n\n # Avoid checking for file existance. Call the decorator wrapper directly.\n load_custom_structure.__wrapped__(custom_structure_name, pwndbg_custom_structure_path)\n\n\n@OnlyWhenStructFileExists\ndef edit_custom_structure(custom_structure_name, custom_structure_path):\n\n # Lookup an editor to use for editing the custom structure.\n editor_preference = os.getenv(\"EDITOR\")\n if not editor_preference:\n editor_preference = os.getenv(\"VISUAL\")\n if not editor_preference:\n editor_preference = \"vi\"\n\n if cymbol_editor != \"\":\n editor_preference = cymbol_editor\n\n try:\n subprocess.run(\n [editor_preference, custom_structure_path],\n check=True,\n )\n except Exception as exception:\n print(message.error(\"An error occured during opening the source file.\"))\n print(message.error(f\"Path to the custom structure: {custom_structure_path}\"))\n print(message.error(\"Please try to manually edit the structure.\"))\n print(\n message.error(\n '\\nTry to set a path to an editor with:\\n\\tset \"cymbol-editor\" /usr/bin/nano'\n )\n )\n return\n\n input(message.notice(\"Press enter when finished editing.\"))\n\n load_custom_structure(custom_structure_name)\n\n\n@OnlyWhenStructFileExists\ndef remove_custom_structure(custom_structure_name, custom_structure_path):\n unload_loaded_symbol(custom_structure_name)\n os.remove(custom_structure_path)\n print(message.success(\"Symbols are removed!\"))\n\n\n@OnlyWhenStructFileExists\ndef load_custom_structure(custom_structure_name, custom_structure_path):\n unload_loaded_symbol(custom_structure_name)\n pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)\n if not pwndbg_debug_symbols_output_file:\n return # generate_debug_symbols prints on failures\n gdb.execute(f\"add-symbol-file {pwndbg_debug_symbols_output_file}\", to_string=True)\n loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file\n print(message.success(\"Symbols are loaded!\"))\n\n\n@OnlyWhenStructFileExists\ndef show_custom_structure(custom_structure_name, custom_structure_path):\n # Call wrapper .func() to avoid memoization.\n highlighted_source = pwndbg.pwndbg.commands.context.get_highlight_source.func(\n custom_structure_path\n )\n print(\"\\n\".join(highlighted_source))\n\n\nparser = argparse.ArgumentParser(\n description=\"Add, show, load, edit, or delete custom structures in plain C\"\n)\nparser.add_argument(\n \"-a\",\n \"--add\",\n metavar=\"name\",\n help=\"Add a new custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-r\",\n \"--remove\",\n metavar=\"name\",\n help=\"Remove an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-e\",\n \"--edit\",\n metavar=\"name\",\n help=\"Edit an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-l\",\n \"--load\",\n metavar=\"name\",\n help=\"Load an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-s\",\n \"--show\",\n metavar=\"name\",\n help=\"Show the source code of an existing custom structure\",\n default=None,\n type=str,\n)\n\n\[email protected](parser)\[email protected]\[email protected]\ndef cymbol(add, remove, edit, load, show):\n if add:\n add_custom_structure(add)\n elif remove:\n remove_custom_structure(remove)\n elif edit:\n edit_custom_structure(edit)\n elif load:\n load_custom_structure(load)\n elif show:\n show_custom_structure(show)\n else:\n parser.print_help()\n", "path": "pwndbg/commands/cymbol.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAdd, load, show, edit, or delete symbols for custom structures.\n\nFor the generation of the symbols g++/gcc is being used under the hood.\n\nIn case of remote debugging a binary which is not native to your architecture it\nis advised to configure the 'gcc-config-path' config parameter to your own cross-platform\ngnu gcc compiled toolchain for your target architecture.\n\nYou are advised to configure the 'cymbol-editor' config parameter to the path of your\nfavorite text editor. Otherwise cymbol exapnds $EDITOR and $VISUAL environment variables\nto find the path to the default text editor.\n\"\"\"\n\nimport argparse\nimport functools\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nimport pwndbg.lib.gcc\nimport pwndbg.lib.tempfile\nfrom pwndbg.color import message\n\ngcc_compiler_path = pwndbg.gdblib.config.add_param(\n \"gcc-compiler-path\",\n \"\",\n \"Path to the gcc/g++ toolchain for generating imported symbols\",\n)\n\ncymbol_editor = pwndbg.gdblib.config.add_param(\n \"cymbol-editor\", \"\", \"Path to the editor for editing custom structures\"\n)\n\n# Remeber loaded symbols. This would be useful for 'remove-symbol-file'.\nloaded_symbols = {}\n\n# Where generated symbol source files are saved.\npwndbg_cachedir = pwndbg.lib.tempfile.cachedir(\"custom-symbols\")\n\n\ndef unload_loaded_symbol(custom_structure_name):\n custom_structure_symbols_file = loaded_symbols.get(custom_structure_name)\n if custom_structure_symbols_file is not None:\n gdb.execute(f\"remove-symbol-file {custom_structure_symbols_file}\")\n loaded_symbols.pop(custom_structure_name)\n\n\ndef OnlyWhenStructFileExists(func):\n @functools.wraps(func)\n def wrapper(custom_structure_name):\n pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + \".c\"\n if not os.path.exists(pwndbg_custom_structure_path):\n print(message.error(\"No custom structure was found with the given name!\"))\n return\n return func(custom_structure_name, pwndbg_custom_structure_path)\n\n return wrapper\n\n\ndef generate_debug_symbols(custom_structure_path, pwndbg_debug_symbols_output_file=None):\n if not pwndbg_debug_symbols_output_file:\n _, pwndbg_debug_symbols_output_file = tempfile.mkstemp(prefix=\"custom-\", suffix=\".dbg\")\n\n # -fno-eliminate-unused-debug-types is a handy gcc flag that lets us extract debug symbols from non-used defined structures.\n gcc_extra_flags = [\n custom_structure_path,\n \"-c\",\n \"-g\",\n \"-fno-eliminate-unused-debug-types\",\n \"-o\",\n pwndbg_debug_symbols_output_file,\n ]\n\n # TODO: implement remote debugging support.\n gcc_flags = pwndbg.lib.gcc.which(pwndbg.gdblib.arch)\n if gcc_compiler_path != \"\":\n gcc_flags[0] = gcc_compiler_path\n\n gcc_cmd = gcc_flags + gcc_extra_flags\n\n try:\n subprocess.run(gcc_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n except subprocess.CalledProcessError as exception:\n print(message.error(exception))\n print(\n message.error(\n \"Failed to compile the .c file with custom structures. Please fix any compilation errors there may be.\"\n )\n )\n return None\n except Exception as exception:\n print(message.error(exception))\n print(message.error(\"An error occured while generating the debug symbols.\"))\n return None\n\n return pwndbg_debug_symbols_output_file\n\n\ndef add_custom_structure(custom_structure_name):\n pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + \".c\"\n\n if os.path.exists(pwndbg_custom_structure_path):\n option = input(\n message.notice(\n \"A custom structure was found with the given name, would you like to overwrite it? [y/n] \"\n )\n )\n if option != \"y\":\n return\n\n print(\n message.notice(\"Enter your custom structure in a C header style, press Ctrl+D to save:\\n\")\n )\n\n custom_structures_source = sys.stdin.read().strip()\n if custom_structures_source == \"\":\n print(message.notice(\"An empty structure is entered, skipping ...\"))\n return\n\n with open(pwndbg_custom_structure_path, \"w\") as f:\n f.write(custom_structures_source)\n\n # Avoid checking for file existance. Call the decorator wrapper directly.\n load_custom_structure.__wrapped__(custom_structure_name, pwndbg_custom_structure_path)\n\n\n@OnlyWhenStructFileExists\ndef edit_custom_structure(custom_structure_name, custom_structure_path):\n\n # Lookup an editor to use for editing the custom structure.\n editor_preference = os.getenv(\"EDITOR\")\n if not editor_preference:\n editor_preference = os.getenv(\"VISUAL\")\n if not editor_preference:\n editor_preference = \"vi\"\n\n if cymbol_editor != \"\":\n editor_preference = cymbol_editor\n\n try:\n subprocess.run(\n [editor_preference, custom_structure_path],\n check=True,\n )\n except Exception as exception:\n print(message.error(\"An error occured during opening the source file.\"))\n print(message.error(f\"Path to the custom structure: {custom_structure_path}\"))\n print(message.error(\"Please try to manually edit the structure.\"))\n print(\n message.error(\n '\\nTry to set a path to an editor with:\\n\\tset \"cymbol-editor\" /usr/bin/nano'\n )\n )\n return\n\n input(message.notice(\"Press enter when finished editing.\"))\n\n load_custom_structure(custom_structure_name)\n\n\n@OnlyWhenStructFileExists\ndef remove_custom_structure(custom_structure_name, custom_structure_path):\n unload_loaded_symbol(custom_structure_name)\n os.remove(custom_structure_path)\n print(message.success(\"Symbols are removed!\"))\n\n\n@OnlyWhenStructFileExists\ndef load_custom_structure(custom_structure_name, custom_structure_path):\n unload_loaded_symbol(custom_structure_name)\n pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)\n if not pwndbg_debug_symbols_output_file:\n return # generate_debug_symbols prints on failures\n # Old GDB versions (e.g. 8.2) requires addr argument in add-symbol-file\n # we set that address to which to load the symbols to 0 since it doesn't matter here\n # (because we are only loading types information)\n gdb.execute(f\"add-symbol-file {pwndbg_debug_symbols_output_file} 0\", to_string=True)\n loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file\n print(message.success(\"Symbols are loaded!\"))\n\n\n@OnlyWhenStructFileExists\ndef show_custom_structure(custom_structure_name, custom_structure_path):\n # Call wrapper .func() to avoid memoization.\n highlighted_source = pwndbg.pwndbg.commands.context.get_highlight_source.func(\n custom_structure_path\n )\n print(\"\\n\".join(highlighted_source))\n\n\nparser = argparse.ArgumentParser(\n description=\"Add, show, load, edit, or delete custom structures in plain C\"\n)\nparser.add_argument(\n \"-a\",\n \"--add\",\n metavar=\"name\",\n help=\"Add a new custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-r\",\n \"--remove\",\n metavar=\"name\",\n help=\"Remove an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-e\",\n \"--edit\",\n metavar=\"name\",\n help=\"Edit an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-l\",\n \"--load\",\n metavar=\"name\",\n help=\"Load an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-s\",\n \"--show\",\n metavar=\"name\",\n help=\"Show the source code of an existing custom structure\",\n default=None,\n type=str,\n)\n\n\[email protected](parser)\[email protected]\[email protected]\ndef cymbol(add, remove, edit, load, show):\n if add:\n add_custom_structure(add)\n elif remove:\n remove_custom_structure(remove)\n elif edit:\n edit_custom_structure(edit)\n elif load:\n load_custom_structure(load)\n elif show:\n show_custom_structure(show)\n else:\n parser.print_help()\n", "path": "pwndbg/commands/cymbol.py"}]} | 2,758 | 230 |
gh_patches_debug_7509 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-3095 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'coroutine' object has no attribute 'add_done_callback'
### Which version of dd-trace-py are you using?
ddtrace 0.57.1
### Which version of pip are you using?
pip 21.3.1
### Which version of the libraries are you using?
aioredis 1.3.1
### What is the result that you get?
```
File "aioredis/commands/list.py", line 97, in lrange
return self.execute(b'LRANGE', key, start, stop, encoding=encoding)
File "ddtrace/contrib/aioredis/patch.py", line 147, in traced_13_execute_command
task.add_done_callback(_finish_span)
```
### What is the result that you expected?
No errors
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/aioredis/patch.py`
Content:
```
1 import sys
2
3 import aioredis
4
5 from ddtrace import config
6 from ddtrace.internal.utils.wrappers import unwrap as _u
7 from ddtrace.pin import Pin
8 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
9
10 from .. import trace_utils
11 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
12 from ...constants import SPAN_MEASURED_KEY
13 from ...ext import SpanTypes
14 from ...ext import net
15 from ...ext import redis as redisx
16 from ..redis.util import _trace_redis_cmd
17 from ..redis.util import _trace_redis_execute_pipeline
18 from ..redis.util import format_command_args
19
20
21 try:
22 from aioredis.commands.transaction import _RedisBuffer
23 except ImportError:
24 _RedisBuffer = None
25
26 config._add("aioredis", dict(_default_service="redis"))
27
28 aioredis_version_str = getattr(aioredis, "__version__", "0.0.0")
29 aioredis_version = tuple([int(i) for i in aioredis_version_str.split(".")])
30
31
32 def patch():
33 if getattr(aioredis, "_datadog_patch", False):
34 return
35 setattr(aioredis, "_datadog_patch", True)
36 pin = Pin()
37 if aioredis_version >= (2, 0):
38 _w("aioredis.client", "Redis.execute_command", traced_execute_command)
39 _w("aioredis.client", "Redis.pipeline", traced_pipeline)
40 _w("aioredis.client", "Pipeline.execute", traced_execute_pipeline)
41 pin.onto(aioredis.client.Redis)
42 else:
43 _w("aioredis", "Redis.execute", traced_13_execute_command)
44 _w("aioredis", "Redis.pipeline", traced_13_pipeline)
45 _w("aioredis.commands.transaction", "Pipeline.execute", traced_13_execute_pipeline)
46 pin.onto(aioredis.Redis)
47
48
49 def unpatch():
50 if not getattr(aioredis, "_datadog_patch", False):
51 return
52
53 setattr(aioredis, "_datadog_patch", False)
54 if aioredis_version >= (2, 0):
55 _u(aioredis.client.Redis, "execute_command")
56 _u(aioredis.client.Redis, "pipeline")
57 _u(aioredis.client.Pipeline, "execute")
58 else:
59 _u(aioredis.Redis, "execute")
60 _u(aioredis.Redis, "pipeline")
61 _u(aioredis.commands.transaction.Pipeline, "execute")
62
63
64 async def traced_execute_command(func, instance, args, kwargs):
65 pin = Pin.get_from(instance)
66 if not pin or not pin.enabled():
67 return await func(*args, **kwargs)
68
69 with _trace_redis_cmd(pin, config.aioredis, instance, args):
70 return await func(*args, **kwargs)
71
72
73 def traced_pipeline(func, instance, args, kwargs):
74 pipeline = func(*args, **kwargs)
75 pin = Pin.get_from(instance)
76 if pin:
77 pin.onto(pipeline)
78 return pipeline
79
80
81 async def traced_execute_pipeline(func, instance, args, kwargs):
82 pin = Pin.get_from(instance)
83 if not pin or not pin.enabled():
84 return await func(*args, **kwargs)
85
86 cmds = [format_command_args(c) for c, _ in instance.command_stack]
87 resource = "\n".join(cmds)
88 with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):
89 return await func(*args, **kwargs)
90
91
92 def traced_13_pipeline(func, instance, args, kwargs):
93 pipeline = func(*args, **kwargs)
94 pin = Pin.get_from(instance)
95 if pin:
96 pin.onto(pipeline)
97 return pipeline
98
99
100 def traced_13_execute_command(func, instance, args, kwargs):
101 # If we have a _RedisBuffer then we are in a pipeline
102 if isinstance(instance.connection, _RedisBuffer):
103 return func(*args, **kwargs)
104
105 pin = Pin.get_from(instance)
106 if not pin or not pin.enabled():
107 return func(*args, **kwargs)
108
109 # Don't activate the span since this operation is performed as a future which concludes sometime later on in
110 # execution so subsequent operations in the stack are not necessarily semantically related
111 # (we don't want this span to be the parent of all other spans created before the future is resolved)
112 span = pin.tracer.start_span(
113 redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False
114 )
115
116 span.set_tag(SPAN_MEASURED_KEY)
117 query = format_command_args(args)
118 span.resource = query
119 span.set_tag(redisx.RAWCMD, query)
120 if pin.tags:
121 span.set_tags(pin.tags)
122
123 span.set_tags(
124 {
125 net.TARGET_HOST: instance.address[0],
126 net.TARGET_PORT: instance.address[1],
127 redisx.DB: instance.db or 0,
128 }
129 )
130 span.set_metric(redisx.ARGS_LEN, len(args))
131 # set analytics sample rate if enabled
132 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
133
134 def _finish_span(future):
135 try:
136 # Accessing the result will raise an exception if:
137 # - The future was cancelled
138 # - There was an error executing the future (`future.exception()`)
139 # - The future is in an invalid state
140 future.result()
141 except Exception:
142 span.set_exc_info(*sys.exc_info())
143 finally:
144 span.finish()
145
146 task = func(*args, **kwargs)
147 task.add_done_callback(_finish_span)
148 return task
149
150
151 async def traced_13_execute_pipeline(func, instance, args, kwargs):
152 pin = Pin.get_from(instance)
153 if not pin or not pin.enabled():
154 return await func(*args, **kwargs)
155
156 cmds = []
157 for _, cmd, cmd_args, _ in instance._pipeline:
158 parts = [cmd]
159 parts.extend(cmd_args)
160 cmds.append(format_command_args(parts))
161 resource = "\n".join(cmds)
162 with pin.tracer.trace(
163 redisx.CMD,
164 resource=resource,
165 service=trace_utils.ext_service(pin, config.aioredis),
166 span_type=SpanTypes.REDIS,
167 ) as span:
168
169 span.set_tags(
170 {
171 net.TARGET_HOST: instance._pool_or_conn.address[0],
172 net.TARGET_PORT: instance._pool_or_conn.address[1],
173 redisx.DB: instance._pool_or_conn.db or 0,
174 }
175 )
176
177 span.set_tag(SPAN_MEASURED_KEY)
178 span.set_tag(redisx.RAWCMD, resource)
179 span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))
180 # set analytics sample rate if enabled
181 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
182
183 return await func(*args, **kwargs)
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py
--- a/ddtrace/contrib/aioredis/patch.py
+++ b/ddtrace/contrib/aioredis/patch.py
@@ -1,3 +1,4 @@
+import asyncio
import sys
import aioredis
@@ -144,6 +145,9 @@
span.finish()
task = func(*args, **kwargs)
+ # Execute command returns a coroutine when no free connections are available
+ # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191
+ task = asyncio.ensure_future(task)
task.add_done_callback(_finish_span)
return task
| {"golden_diff": "diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py\n--- a/ddtrace/contrib/aioredis/patch.py\n+++ b/ddtrace/contrib/aioredis/patch.py\n@@ -1,3 +1,4 @@\n+import asyncio\n import sys\n \n import aioredis\n@@ -144,6 +145,9 @@\n span.finish()\n \n task = func(*args, **kwargs)\n+ # Execute command returns a coroutine when no free connections are available\n+ # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191\n+ task = asyncio.ensure_future(task)\n task.add_done_callback(_finish_span)\n return task\n", "issue": "AttributeError: 'coroutine' object has no attribute 'add_done_callback'\n\r\n### Which version of dd-trace-py are you using?\r\n\r\nddtrace 0.57.1\r\n\r\n### Which version of pip are you using?\r\n\r\npip 21.3.1\r\n\r\n### Which version of the libraries are you using?\r\n\r\naioredis 1.3.1\r\n\r\n### What is the result that you get?\r\n\r\n```\r\n File \"aioredis/commands/list.py\", line 97, in lrange\r\n return self.execute(b'LRANGE', key, start, stop, encoding=encoding)\r\n File \"ddtrace/contrib/aioredis/patch.py\", line 147, in traced_13_execute_command\r\n task.add_done_callback(_finish_span)\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nNo errors\r\n\r\n\n", "before_files": [{"content": "import sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n span = pin.tracer.start_span(\n redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}], "after_files": [{"content": "import asyncio\nimport sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n span = pin.tracer.start_span(\n redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n # Execute command returns a coroutine when no free connections are available\n # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191\n task = asyncio.ensure_future(task)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}]} | 2,405 | 181 |
gh_patches_debug_23684 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4964 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Addons: `running` is invoked twice.
##### Steps to reproduce the problem:
1. Write an addon that implements `running()`
2. Running is invoked twice on startup.
Addons: `running` is invoked twice.
##### Steps to reproduce the problem:
1. Write an addon that implements `running()`
2. Running is invoked twice on startup.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/script.py`
Content:
```
1 import asyncio
2 import os
3 import importlib.util
4 import importlib.machinery
5 import sys
6 import types
7 import typing
8 import traceback
9
10 from mitmproxy import addonmanager, hooks
11 from mitmproxy import exceptions
12 from mitmproxy import flow
13 from mitmproxy import command
14 from mitmproxy import eventsequence
15 from mitmproxy import ctx
16 import mitmproxy.types as mtypes
17
18
19 def load_script(path: str) -> typing.Optional[types.ModuleType]:
20 fullname = "__mitmproxy_script__.{}".format(
21 os.path.splitext(os.path.basename(path))[0]
22 )
23 # the fullname is not unique among scripts, so if there already is an existing script with said
24 # fullname, remove it.
25 sys.modules.pop(fullname, None)
26 oldpath = sys.path
27 sys.path.insert(0, os.path.dirname(path))
28 m = None
29 try:
30 loader = importlib.machinery.SourceFileLoader(fullname, path)
31 spec = importlib.util.spec_from_loader(fullname, loader=loader)
32 assert spec
33 m = importlib.util.module_from_spec(spec)
34 loader.exec_module(m)
35 if not getattr(m, "name", None):
36 m.name = path # type: ignore
37 except Exception as e:
38 script_error_handler(path, e, msg=str(e))
39 finally:
40 sys.path[:] = oldpath
41 return m
42
43
44 def script_error_handler(path, exc, msg="", tb=False):
45 """
46 Handles all the user's script errors with
47 an optional traceback
48 """
49 exception = type(exc).__name__
50 if msg:
51 exception = msg
52 lineno = ""
53 if hasattr(exc, "lineno"):
54 lineno = str(exc.lineno)
55 log_msg = f"in script {path}:{lineno} {exception}"
56 if tb:
57 etype, value, tback = sys.exc_info()
58 tback = addonmanager.cut_traceback(tback, "invoke_addon")
59 log_msg = log_msg + "\n" + "".join(traceback.format_exception(etype, value, tback))
60 ctx.log.error(log_msg)
61
62
63 ReloadInterval = 1
64
65
66 class Script:
67 """
68 An addon that manages a single script.
69 """
70
71 def __init__(self, path: str, reload: bool) -> None:
72 self.name = "scriptmanager:" + path
73 self.path = path
74 self.fullpath = os.path.expanduser(
75 path.strip("'\" ")
76 )
77 self.ns = None
78
79 if not os.path.isfile(self.fullpath):
80 raise exceptions.OptionsError('No such script')
81
82 self.reloadtask = None
83 if reload:
84 self.reloadtask = asyncio.ensure_future(self.watcher())
85 else:
86 self.loadscript()
87
88 def done(self):
89 if self.reloadtask:
90 self.reloadtask.cancel()
91
92 @property
93 def addons(self):
94 return [self.ns] if self.ns else []
95
96 def loadscript(self):
97 ctx.log.info("Loading script %s" % self.path)
98 if self.ns:
99 ctx.master.addons.remove(self.ns)
100 self.ns = None
101 with addonmanager.safecall():
102 ns = load_script(self.fullpath)
103 ctx.master.addons.register(ns)
104 self.ns = ns
105 if self.ns:
106 # We're already running, so we have to explicitly register and
107 # configure the addon
108 ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())
109 try:
110 ctx.master.addons.invoke_addon(
111 self.ns,
112 hooks.ConfigureHook(ctx.options.keys())
113 )
114 except exceptions.OptionsError as e:
115 script_error_handler(self.fullpath, e, msg=str(e))
116
117 async def watcher(self):
118 last_mtime = 0
119 while True:
120 try:
121 mtime = os.stat(self.fullpath).st_mtime
122 except FileNotFoundError:
123 ctx.log.info("Removing script %s" % self.path)
124 scripts = list(ctx.options.scripts)
125 scripts.remove(self.path)
126 ctx.options.update(scripts=scripts)
127 return
128 if mtime > last_mtime:
129 self.loadscript()
130 last_mtime = mtime
131 await asyncio.sleep(ReloadInterval)
132
133
134 class ScriptLoader:
135 """
136 An addon that manages loading scripts from options.
137 """
138 def __init__(self):
139 self.is_running = False
140 self.addons = []
141
142 def load(self, loader):
143 loader.add_option(
144 "scripts", typing.Sequence[str], [],
145 "Execute a script."
146 )
147
148 def running(self):
149 self.is_running = True
150
151 @command.command("script.run")
152 def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:
153 """
154 Run a script on the specified flows. The script is configured with
155 the current options and all lifecycle events for each flow are
156 simulated. Note that the load event is not invoked.
157 """
158 if not os.path.isfile(path):
159 ctx.log.error('No such script: %s' % path)
160 return
161 mod = load_script(path)
162 if mod:
163 with addonmanager.safecall():
164 ctx.master.addons.invoke_addon(mod, hooks.RunningHook())
165 ctx.master.addons.invoke_addon(
166 mod,
167 hooks.ConfigureHook(ctx.options.keys()),
168 )
169 for f in flows:
170 for evt in eventsequence.iterate(f):
171 ctx.master.addons.invoke_addon(mod, evt)
172
173 def configure(self, updated):
174 if "scripts" in updated:
175 for s in ctx.options.scripts:
176 if ctx.options.scripts.count(s) > 1:
177 raise exceptions.OptionsError("Duplicate script")
178
179 for a in self.addons[:]:
180 if a.path not in ctx.options.scripts:
181 ctx.log.info("Un-loading script: %s" % a.path)
182 ctx.master.addons.remove(a)
183 self.addons.remove(a)
184
185 # The machinations below are to ensure that:
186 # - Scripts remain in the same order
187 # - Scripts are not initialized un-necessarily. If only a
188 # script's order in the script list has changed, it is just
189 # moved.
190
191 current = {}
192 for a in self.addons:
193 current[a.path] = a
194
195 ordered = []
196 newscripts = []
197 for s in ctx.options.scripts:
198 if s in current:
199 ordered.append(current[s])
200 else:
201 sc = Script(s, True)
202 ordered.append(sc)
203 newscripts.append(sc)
204
205 self.addons = ordered
206
207 for s in newscripts:
208 ctx.master.addons.register(s)
209 if self.is_running:
210 # If we're already running, we configure and tell the addon
211 # we're up and running.
212 ctx.master.addons.invoke_addon(s, hooks.RunningHook())
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py
--- a/mitmproxy/addons/script.py
+++ b/mitmproxy/addons/script.py
@@ -75,6 +75,7 @@
path.strip("'\" ")
)
self.ns = None
+ self.is_running = False
if not os.path.isfile(self.fullpath):
raise exceptions.OptionsError('No such script')
@@ -85,6 +86,9 @@
else:
self.loadscript()
+ def running(self):
+ self.is_running = True
+
def done(self):
if self.reloadtask:
self.reloadtask.cancel()
@@ -105,7 +109,8 @@
if self.ns:
# We're already running, so we have to explicitly register and
# configure the addon
- ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())
+ if self.is_running:
+ ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())
try:
ctx.master.addons.invoke_addon(
self.ns,
| {"golden_diff": "diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py\n--- a/mitmproxy/addons/script.py\n+++ b/mitmproxy/addons/script.py\n@@ -75,6 +75,7 @@\n path.strip(\"'\\\" \")\n )\n self.ns = None\n+ self.is_running = False\n \n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script')\n@@ -85,6 +86,9 @@\n else:\n self.loadscript()\n \n+ def running(self):\n+ self.is_running = True\n+\n def done(self):\n if self.reloadtask:\n self.reloadtask.cancel()\n@@ -105,7 +109,8 @@\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n- ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())\n+ if self.is_running:\n+ ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())\n try:\n ctx.master.addons.invoke_addon(\n self.ns,\n", "issue": "Addons: `running` is invoked twice.\n##### Steps to reproduce the problem:\r\n\r\n1. Write an addon that implements `running()`\r\n2. Running is invoked twice on startup.\r\n\nAddons: `running` is invoked twice.\n##### Steps to reproduce the problem:\r\n\r\n1. Write an addon that implements `running()`\r\n2. Running is invoked twice on startup.\r\n\n", "before_files": [{"content": "import asyncio\nimport os\nimport importlib.util\nimport importlib.machinery\nimport sys\nimport types\nimport typing\nimport traceback\n\nfrom mitmproxy import addonmanager, hooks\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import ctx\nimport mitmproxy.types as mtypes\n\n\ndef load_script(path: str) -> typing.Optional[types.ModuleType]:\n fullname = \"__mitmproxy_script__.{}\".format(\n os.path.splitext(os.path.basename(path))[0]\n )\n # the fullname is not unique among scripts, so if there already is an existing script with said\n # fullname, remove it.\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n m = None\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n assert spec\n m = importlib.util.module_from_spec(spec)\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n except Exception as e:\n script_error_handler(path, e, msg=str(e))\n finally:\n sys.path[:] = oldpath\n return m\n\n\ndef script_error_handler(path, exc, msg=\"\", tb=False):\n \"\"\"\n Handles all the user's script errors with\n an optional traceback\n \"\"\"\n exception = type(exc).__name__\n if msg:\n exception = msg\n lineno = \"\"\n if hasattr(exc, \"lineno\"):\n lineno = str(exc.lineno)\n log_msg = f\"in script {path}:{lineno} {exception}\"\n if tb:\n etype, value, tback = sys.exc_info()\n tback = addonmanager.cut_traceback(tback, \"invoke_addon\")\n log_msg = log_msg + \"\\n\" + \"\".join(traceback.format_exception(etype, value, tback))\n ctx.log.error(log_msg)\n\n\nReloadInterval = 1\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n\n def __init__(self, path: str, reload: bool) -> None:\n self.name = \"scriptmanager:\" + path\n self.path = path\n self.fullpath = os.path.expanduser(\n path.strip(\"'\\\" \")\n )\n self.ns = None\n\n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script')\n\n self.reloadtask = None\n if reload:\n self.reloadtask = asyncio.ensure_future(self.watcher())\n else:\n self.loadscript()\n\n def done(self):\n if self.reloadtask:\n self.reloadtask.cancel()\n\n @property\n def addons(self):\n return [self.ns] if self.ns else []\n\n def loadscript(self):\n ctx.log.info(\"Loading script %s\" % self.path)\n if self.ns:\n ctx.master.addons.remove(self.ns)\n self.ns = None\n with addonmanager.safecall():\n ns = load_script(self.fullpath)\n ctx.master.addons.register(ns)\n self.ns = ns\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())\n try:\n ctx.master.addons.invoke_addon(\n self.ns,\n hooks.ConfigureHook(ctx.options.keys())\n )\n except exceptions.OptionsError as e:\n script_error_handler(self.fullpath, e, msg=str(e))\n\n async def watcher(self):\n last_mtime = 0\n while True:\n try:\n mtime = os.stat(self.fullpath).st_mtime\n except FileNotFoundError:\n ctx.log.info(\"Removing script %s\" % self.path)\n scripts = list(ctx.options.scripts)\n scripts.remove(self.path)\n ctx.options.update(scripts=scripts)\n return\n if mtime > last_mtime:\n self.loadscript()\n last_mtime = mtime\n await asyncio.sleep(ReloadInterval)\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n self.addons = []\n\n def load(self, loader):\n loader.add_option(\n \"scripts\", typing.Sequence[str], [],\n \"Execute a script.\"\n )\n\n def running(self):\n self.is_running = True\n\n @command.command(\"script.run\")\n def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:\n \"\"\"\n Run a script on the specified flows. The script is configured with\n the current options and all lifecycle events for each flow are\n simulated. Note that the load event is not invoked.\n \"\"\"\n if not os.path.isfile(path):\n ctx.log.error('No such script: %s' % path)\n return\n mod = load_script(path)\n if mod:\n with addonmanager.safecall():\n ctx.master.addons.invoke_addon(mod, hooks.RunningHook())\n ctx.master.addons.invoke_addon(\n mod,\n hooks.ConfigureHook(ctx.options.keys()),\n )\n for f in flows:\n for evt in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(mod, evt)\n\n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script\")\n\n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.path)\n ctx.master.addons.remove(a)\n self.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it is just\n # moved.\n\n current = {}\n for a in self.addons:\n current[a.path] = a\n\n ordered = []\n newscripts = []\n for s in ctx.options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n sc = Script(s, True)\n ordered.append(sc)\n newscripts.append(sc)\n\n self.addons = ordered\n\n for s in newscripts:\n ctx.master.addons.register(s)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(s, hooks.RunningHook())\n", "path": "mitmproxy/addons/script.py"}], "after_files": [{"content": "import asyncio\nimport os\nimport importlib.util\nimport importlib.machinery\nimport sys\nimport types\nimport typing\nimport traceback\n\nfrom mitmproxy import addonmanager, hooks\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import ctx\nimport mitmproxy.types as mtypes\n\n\ndef load_script(path: str) -> typing.Optional[types.ModuleType]:\n fullname = \"__mitmproxy_script__.{}\".format(\n os.path.splitext(os.path.basename(path))[0]\n )\n # the fullname is not unique among scripts, so if there already is an existing script with said\n # fullname, remove it.\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n m = None\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n assert spec\n m = importlib.util.module_from_spec(spec)\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n except Exception as e:\n script_error_handler(path, e, msg=str(e))\n finally:\n sys.path[:] = oldpath\n return m\n\n\ndef script_error_handler(path, exc, msg=\"\", tb=False):\n \"\"\"\n Handles all the user's script errors with\n an optional traceback\n \"\"\"\n exception = type(exc).__name__\n if msg:\n exception = msg\n lineno = \"\"\n if hasattr(exc, \"lineno\"):\n lineno = str(exc.lineno)\n log_msg = f\"in script {path}:{lineno} {exception}\"\n if tb:\n etype, value, tback = sys.exc_info()\n tback = addonmanager.cut_traceback(tback, \"invoke_addon\")\n log_msg = log_msg + \"\\n\" + \"\".join(traceback.format_exception(etype, value, tback))\n ctx.log.error(log_msg)\n\n\nReloadInterval = 1\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n\n def __init__(self, path: str, reload: bool) -> None:\n self.name = \"scriptmanager:\" + path\n self.path = path\n self.fullpath = os.path.expanduser(\n path.strip(\"'\\\" \")\n )\n self.ns = None\n self.is_running = False\n\n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script')\n\n self.reloadtask = None\n if reload:\n self.reloadtask = asyncio.ensure_future(self.watcher())\n else:\n self.loadscript()\n\n def running(self):\n self.is_running = True\n\n def done(self):\n if self.reloadtask:\n self.reloadtask.cancel()\n\n @property\n def addons(self):\n return [self.ns] if self.ns else []\n\n def loadscript(self):\n ctx.log.info(\"Loading script %s\" % self.path)\n if self.ns:\n ctx.master.addons.remove(self.ns)\n self.ns = None\n with addonmanager.safecall():\n ns = load_script(self.fullpath)\n ctx.master.addons.register(ns)\n self.ns = ns\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n if self.is_running:\n ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())\n try:\n ctx.master.addons.invoke_addon(\n self.ns,\n hooks.ConfigureHook(ctx.options.keys())\n )\n except exceptions.OptionsError as e:\n script_error_handler(self.fullpath, e, msg=str(e))\n\n async def watcher(self):\n last_mtime = 0\n while True:\n try:\n mtime = os.stat(self.fullpath).st_mtime\n except FileNotFoundError:\n ctx.log.info(\"Removing script %s\" % self.path)\n scripts = list(ctx.options.scripts)\n scripts.remove(self.path)\n ctx.options.update(scripts=scripts)\n return\n if mtime > last_mtime:\n self.loadscript()\n last_mtime = mtime\n await asyncio.sleep(ReloadInterval)\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n self.addons = []\n\n def load(self, loader):\n loader.add_option(\n \"scripts\", typing.Sequence[str], [],\n \"Execute a script.\"\n )\n\n def running(self):\n self.is_running = True\n\n @command.command(\"script.run\")\n def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:\n \"\"\"\n Run a script on the specified flows. The script is configured with\n the current options and all lifecycle events for each flow are\n simulated. Note that the load event is not invoked.\n \"\"\"\n if not os.path.isfile(path):\n ctx.log.error('No such script: %s' % path)\n return\n mod = load_script(path)\n if mod:\n with addonmanager.safecall():\n ctx.master.addons.invoke_addon(mod, hooks.RunningHook())\n ctx.master.addons.invoke_addon(\n mod,\n hooks.ConfigureHook(ctx.options.keys()),\n )\n for f in flows:\n for evt in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(mod, evt)\n\n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script\")\n\n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.path)\n ctx.master.addons.remove(a)\n self.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it is just\n # moved.\n\n current = {}\n for a in self.addons:\n current[a.path] = a\n\n ordered = []\n newscripts = []\n for s in ctx.options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n sc = Script(s, True)\n ordered.append(sc)\n newscripts.append(sc)\n\n self.addons = ordered\n\n for s in newscripts:\n ctx.master.addons.register(s)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(s, hooks.RunningHook())\n", "path": "mitmproxy/addons/script.py"}]} | 2,339 | 244 |
gh_patches_debug_26174 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-776 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove botan from our library
according to [this](https://github.com/botanio/sdk#py) botan has it's own implementation for python. No need to reinvent the wheel. I suggest we remove it from ptb in the next major (8.0) version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/contrib/__init__.py`
Content:
```
1 from .botan import Botan
2
3 __all__ = ['Botan']
4
```
Path: `telegram/contrib/botan.py`
Content:
```
1 import logging
2
3 from future.moves.urllib.parse import quote
4 from future.moves.urllib.error import HTTPError, URLError
5 from future.moves.urllib.request import urlopen, Request
6
7 logging.getLogger(__name__).addHandler(logging.NullHandler())
8
9
10 class Botan(object):
11 """This class helps to send incoming events to your botan analytics account.
12 See more: https://github.com/botanio/sdk#botan-sdk
13 """
14
15 token = ''
16 url_template = 'https://api.botan.io/track?token={token}' \
17 '&uid={uid}&name={name}&src=python-telegram-bot'
18
19 def __init__(self, token):
20 self.token = token
21 self.logger = logging.getLogger(__name__)
22
23 def track(self, message, event_name='event'):
24 try:
25 uid = message.chat_id
26 except AttributeError:
27 self.logger.warn('No chat_id in message')
28 return False
29 data = message.to_json()
30 try:
31 url = self.url_template.format(
32 token=str(self.token), uid=str(uid), name=quote(event_name))
33 request = Request(
34 url, data=data.encode(), headers={'Content-Type': 'application/json'})
35 urlopen(request)
36 return True
37 except HTTPError as error:
38 self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(
39 'utf-8'))
40 return False
41 except URLError as error:
42 self.logger.warn('Botan track error ' + str(error.reason))
43 return False
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/contrib/__init__.py b/telegram/contrib/__init__.py
deleted file mode 100644
--- a/telegram/contrib/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .botan import Botan
-
-__all__ = ['Botan']
diff --git a/telegram/contrib/botan.py b/telegram/contrib/botan.py
deleted file mode 100644
--- a/telegram/contrib/botan.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-
-from future.moves.urllib.parse import quote
-from future.moves.urllib.error import HTTPError, URLError
-from future.moves.urllib.request import urlopen, Request
-
-logging.getLogger(__name__).addHandler(logging.NullHandler())
-
-
-class Botan(object):
- """This class helps to send incoming events to your botan analytics account.
- See more: https://github.com/botanio/sdk#botan-sdk
- """
-
- token = ''
- url_template = 'https://api.botan.io/track?token={token}' \
- '&uid={uid}&name={name}&src=python-telegram-bot'
-
- def __init__(self, token):
- self.token = token
- self.logger = logging.getLogger(__name__)
-
- def track(self, message, event_name='event'):
- try:
- uid = message.chat_id
- except AttributeError:
- self.logger.warn('No chat_id in message')
- return False
- data = message.to_json()
- try:
- url = self.url_template.format(
- token=str(self.token), uid=str(uid), name=quote(event_name))
- request = Request(
- url, data=data.encode(), headers={'Content-Type': 'application/json'})
- urlopen(request)
- return True
- except HTTPError as error:
- self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(
- 'utf-8'))
- return False
- except URLError as error:
- self.logger.warn('Botan track error ' + str(error.reason))
- return False
| {"golden_diff": "diff --git a/telegram/contrib/__init__.py b/telegram/contrib/__init__.py\ndeleted file mode 100644\n--- a/telegram/contrib/__init__.py\n+++ /dev/null\n@@ -1,3 +0,0 @@\n-from .botan import Botan\n-\n-__all__ = ['Botan']\ndiff --git a/telegram/contrib/botan.py b/telegram/contrib/botan.py\ndeleted file mode 100644\n--- a/telegram/contrib/botan.py\n+++ /dev/null\n@@ -1,43 +0,0 @@\n-import logging\n-\n-from future.moves.urllib.parse import quote\n-from future.moves.urllib.error import HTTPError, URLError\n-from future.moves.urllib.request import urlopen, Request\n-\n-logging.getLogger(__name__).addHandler(logging.NullHandler())\n-\n-\n-class Botan(object):\n- \"\"\"This class helps to send incoming events to your botan analytics account.\n- See more: https://github.com/botanio/sdk#botan-sdk\n- \"\"\"\n-\n- token = ''\n- url_template = 'https://api.botan.io/track?token={token}' \\\n- '&uid={uid}&name={name}&src=python-telegram-bot'\n-\n- def __init__(self, token):\n- self.token = token\n- self.logger = logging.getLogger(__name__)\n-\n- def track(self, message, event_name='event'):\n- try:\n- uid = message.chat_id\n- except AttributeError:\n- self.logger.warn('No chat_id in message')\n- return False\n- data = message.to_json()\n- try:\n- url = self.url_template.format(\n- token=str(self.token), uid=str(uid), name=quote(event_name))\n- request = Request(\n- url, data=data.encode(), headers={'Content-Type': 'application/json'})\n- urlopen(request)\n- return True\n- except HTTPError as error:\n- self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(\n- 'utf-8'))\n- return False\n- except URLError as error:\n- self.logger.warn('Botan track error ' + str(error.reason))\n- return False\n", "issue": "Remove botan from our library\naccording to [this](https://github.com/botanio/sdk#py) botan has it's own implementation for python. No need to reinvent the wheel. I suggest we remove it from ptb in the next major (8.0) version.\n", "before_files": [{"content": "from .botan import Botan\n\n__all__ = ['Botan']\n", "path": "telegram/contrib/__init__.py"}, {"content": "import logging\n\nfrom future.moves.urllib.parse import quote\nfrom future.moves.urllib.error import HTTPError, URLError\nfrom future.moves.urllib.request import urlopen, Request\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\n\nclass Botan(object):\n \"\"\"This class helps to send incoming events to your botan analytics account.\n See more: https://github.com/botanio/sdk#botan-sdk\n \"\"\"\n\n token = ''\n url_template = 'https://api.botan.io/track?token={token}' \\\n '&uid={uid}&name={name}&src=python-telegram-bot'\n\n def __init__(self, token):\n self.token = token\n self.logger = logging.getLogger(__name__)\n\n def track(self, message, event_name='event'):\n try:\n uid = message.chat_id\n except AttributeError:\n self.logger.warn('No chat_id in message')\n return False\n data = message.to_json()\n try:\n url = self.url_template.format(\n token=str(self.token), uid=str(uid), name=quote(event_name))\n request = Request(\n url, data=data.encode(), headers={'Content-Type': 'application/json'})\n urlopen(request)\n return True\n except HTTPError as error:\n self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(\n 'utf-8'))\n return False\n except URLError as error:\n self.logger.warn('Botan track error ' + str(error.reason))\n return False\n", "path": "telegram/contrib/botan.py"}], "after_files": [{"content": null, "path": "telegram/contrib/__init__.py"}, {"content": null, "path": "telegram/contrib/botan.py"}]} | 769 | 498 |
gh_patches_debug_5151 | rasdani/github-patches | git_diff | pyca__cryptography-4289 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Raise clearer error message when passing a unicode value as a symmetric key
Python: 2.7.3
cryptography==2.1.4
cffi==1.11.5
pip 9.0.1
```
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
import os
iv = os.urandom(16)
cipher = Cipher(algorithms.AES(key), modes.CFB8(iv),
backend=default_backend())
decryptor = cipher.decryptor()
raw_text = decryptor.update(encoded_text) + decryptor.finalize()
```
If `key` is unicode it will raise error.
```
/env/local/lib/python2.7/site-packages/cryptography/hazmat/primitives/ciphers/base.pyc in decryptor(self)
125 def decryptor(self):
126 ctx = self._backend.create_symmetric_decryption_ctx(
--> 127 self.algorithm, self.mode
128 )
129 return self._wrap_ctx(ctx, encrypt=False)
/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/backend.pyc in create_symmetric_decryption_ctx(self, cipher, mode)
270
271 def create_symmetric_decryption_ctx(self, cipher, mode):
--> 272 return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)
273
274 def pbkdf2_hmac_supported(self, algorithm):
/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/ciphers.pyc in __init__(self, backend, cipher, mode, operation)
108 cipher.key,
109 iv_nonce,
--> 110 operation
111 )
112 self._backend.openssl_assert(res != 0)
```
I have this fix temporary
```key = key.encode('utf-8') if isinstance(key, unicode) else key```
But should we do thi in `backends/openssl/ciphers.py` ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/ciphers/algorithms.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from cryptography import utils
8 from cryptography.hazmat.primitives.ciphers import (
9 BlockCipherAlgorithm, CipherAlgorithm
10 )
11 from cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce
12
13
14 def _verify_key_size(algorithm, key):
15 # Verify that the key size matches the expected key size
16 if len(key) * 8 not in algorithm.key_sizes:
17 raise ValueError("Invalid key size ({0}) for {1}.".format(
18 len(key) * 8, algorithm.name
19 ))
20 return key
21
22
23 @utils.register_interface(BlockCipherAlgorithm)
24 @utils.register_interface(CipherAlgorithm)
25 class AES(object):
26 name = "AES"
27 block_size = 128
28 # 512 added to support AES-256-XTS, which uses 512-bit keys
29 key_sizes = frozenset([128, 192, 256, 512])
30
31 def __init__(self, key):
32 self.key = _verify_key_size(self, key)
33
34 @property
35 def key_size(self):
36 return len(self.key) * 8
37
38
39 @utils.register_interface(BlockCipherAlgorithm)
40 @utils.register_interface(CipherAlgorithm)
41 class Camellia(object):
42 name = "camellia"
43 block_size = 128
44 key_sizes = frozenset([128, 192, 256])
45
46 def __init__(self, key):
47 self.key = _verify_key_size(self, key)
48
49 @property
50 def key_size(self):
51 return len(self.key) * 8
52
53
54 @utils.register_interface(BlockCipherAlgorithm)
55 @utils.register_interface(CipherAlgorithm)
56 class TripleDES(object):
57 name = "3DES"
58 block_size = 64
59 key_sizes = frozenset([64, 128, 192])
60
61 def __init__(self, key):
62 if len(key) == 8:
63 key += key + key
64 elif len(key) == 16:
65 key += key[:8]
66 self.key = _verify_key_size(self, key)
67
68 @property
69 def key_size(self):
70 return len(self.key) * 8
71
72
73 @utils.register_interface(BlockCipherAlgorithm)
74 @utils.register_interface(CipherAlgorithm)
75 class Blowfish(object):
76 name = "Blowfish"
77 block_size = 64
78 key_sizes = frozenset(range(32, 449, 8))
79
80 def __init__(self, key):
81 self.key = _verify_key_size(self, key)
82
83 @property
84 def key_size(self):
85 return len(self.key) * 8
86
87
88 @utils.register_interface(BlockCipherAlgorithm)
89 @utils.register_interface(CipherAlgorithm)
90 class CAST5(object):
91 name = "CAST5"
92 block_size = 64
93 key_sizes = frozenset(range(40, 129, 8))
94
95 def __init__(self, key):
96 self.key = _verify_key_size(self, key)
97
98 @property
99 def key_size(self):
100 return len(self.key) * 8
101
102
103 @utils.register_interface(CipherAlgorithm)
104 class ARC4(object):
105 name = "RC4"
106 key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256])
107
108 def __init__(self, key):
109 self.key = _verify_key_size(self, key)
110
111 @property
112 def key_size(self):
113 return len(self.key) * 8
114
115
116 @utils.register_interface(CipherAlgorithm)
117 class IDEA(object):
118 name = "IDEA"
119 block_size = 64
120 key_sizes = frozenset([128])
121
122 def __init__(self, key):
123 self.key = _verify_key_size(self, key)
124
125 @property
126 def key_size(self):
127 return len(self.key) * 8
128
129
130 @utils.register_interface(BlockCipherAlgorithm)
131 @utils.register_interface(CipherAlgorithm)
132 class SEED(object):
133 name = "SEED"
134 block_size = 128
135 key_sizes = frozenset([128])
136
137 def __init__(self, key):
138 self.key = _verify_key_size(self, key)
139
140 @property
141 def key_size(self):
142 return len(self.key) * 8
143
144
145 @utils.register_interface(CipherAlgorithm)
146 @utils.register_interface(ModeWithNonce)
147 class ChaCha20(object):
148 name = "ChaCha20"
149 key_sizes = frozenset([256])
150
151 def __init__(self, key, nonce):
152 self.key = _verify_key_size(self, key)
153 if not isinstance(nonce, bytes):
154 raise TypeError("nonce must be bytes")
155
156 if len(nonce) != 16:
157 raise ValueError("nonce must be 128-bits (16 bytes)")
158
159 self._nonce = nonce
160
161 nonce = utils.read_only_property("_nonce")
162
163 @property
164 def key_size(self):
165 return len(self.key) * 8
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/ciphers/algorithms.py b/src/cryptography/hazmat/primitives/ciphers/algorithms.py
--- a/src/cryptography/hazmat/primitives/ciphers/algorithms.py
+++ b/src/cryptography/hazmat/primitives/ciphers/algorithms.py
@@ -12,6 +12,9 @@
def _verify_key_size(algorithm, key):
+ # Verify that the key is instance of bytes
+ utils._check_bytes("key", key)
+
# Verify that the key size matches the expected key size
if len(key) * 8 not in algorithm.key_sizes:
raise ValueError("Invalid key size ({0}) for {1}.".format(
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/ciphers/algorithms.py b/src/cryptography/hazmat/primitives/ciphers/algorithms.py\n--- a/src/cryptography/hazmat/primitives/ciphers/algorithms.py\n+++ b/src/cryptography/hazmat/primitives/ciphers/algorithms.py\n@@ -12,6 +12,9 @@\n \n \n def _verify_key_size(algorithm, key):\n+ # Verify that the key is instance of bytes\n+ utils._check_bytes(\"key\", key)\n+\n # Verify that the key size matches the expected key size\n if len(key) * 8 not in algorithm.key_sizes:\n raise ValueError(\"Invalid key size ({0}) for {1}.\".format(\n", "issue": "Raise clearer error message when passing a unicode value as a symmetric key\nPython: 2.7.3\r\ncryptography==2.1.4\r\ncffi==1.11.5\r\npip 9.0.1\r\n\r\n```\r\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\r\nfrom cryptography.hazmat.backends import default_backend\r\nimport os\r\n\r\niv = os.urandom(16)\r\ncipher = Cipher(algorithms.AES(key), modes.CFB8(iv),\r\n backend=default_backend())\r\ndecryptor = cipher.decryptor()\r\nraw_text = decryptor.update(encoded_text) + decryptor.finalize()\r\n```\r\n\r\nIf `key` is unicode it will raise error.\r\n```\r\n\r\n/env/local/lib/python2.7/site-packages/cryptography/hazmat/primitives/ciphers/base.pyc in decryptor(self)\r\n 125 def decryptor(self):\r\n 126 ctx = self._backend.create_symmetric_decryption_ctx(\r\n--> 127 self.algorithm, self.mode\r\n 128 )\r\n 129 return self._wrap_ctx(ctx, encrypt=False)\r\n\r\n/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/backend.pyc in create_symmetric_decryption_ctx(self, cipher, mode)\r\n 270 \r\n 271 def create_symmetric_decryption_ctx(self, cipher, mode):\r\n--> 272 return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)\r\n 273 \r\n 274 def pbkdf2_hmac_supported(self, algorithm):\r\n\r\n/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/ciphers.pyc in __init__(self, backend, cipher, mode, operation)\r\n 108 cipher.key,\r\n 109 iv_nonce,\r\n--> 110 operation\r\n 111 )\r\n 112 self._backend.openssl_assert(res != 0)\r\n``` \r\n\r\nI have this fix temporary\r\n```key = key.encode('utf-8') if isinstance(key, unicode) else key```\r\nBut should we do thi in `backends/openssl/ciphers.py` ?\r\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives.ciphers import (\n BlockCipherAlgorithm, CipherAlgorithm\n)\nfrom cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce\n\n\ndef _verify_key_size(algorithm, key):\n # Verify that the key size matches the expected key size\n if len(key) * 8 not in algorithm.key_sizes:\n raise ValueError(\"Invalid key size ({0}) for {1}.\".format(\n len(key) * 8, algorithm.name\n ))\n return key\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass AES(object):\n name = \"AES\"\n block_size = 128\n # 512 added to support AES-256-XTS, which uses 512-bit keys\n key_sizes = frozenset([128, 192, 256, 512])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass Camellia(object):\n name = \"camellia\"\n block_size = 128\n key_sizes = frozenset([128, 192, 256])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass TripleDES(object):\n name = \"3DES\"\n block_size = 64\n key_sizes = frozenset([64, 128, 192])\n\n def __init__(self, key):\n if len(key) == 8:\n key += key + key\n elif len(key) == 16:\n key += key[:8]\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass Blowfish(object):\n name = \"Blowfish\"\n block_size = 64\n key_sizes = frozenset(range(32, 449, 8))\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass CAST5(object):\n name = \"CAST5\"\n block_size = 64\n key_sizes = frozenset(range(40, 129, 8))\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\nclass ARC4(object):\n name = \"RC4\"\n key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\nclass IDEA(object):\n name = \"IDEA\"\n block_size = 64\n key_sizes = frozenset([128])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass SEED(object):\n name = \"SEED\"\n block_size = 128\n key_sizes = frozenset([128])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\[email protected]_interface(ModeWithNonce)\nclass ChaCha20(object):\n name = \"ChaCha20\"\n key_sizes = frozenset([256])\n\n def __init__(self, key, nonce):\n self.key = _verify_key_size(self, key)\n if not isinstance(nonce, bytes):\n raise TypeError(\"nonce must be bytes\")\n\n if len(nonce) != 16:\n raise ValueError(\"nonce must be 128-bits (16 bytes)\")\n\n self._nonce = nonce\n\n nonce = utils.read_only_property(\"_nonce\")\n\n @property\n def key_size(self):\n return len(self.key) * 8\n", "path": "src/cryptography/hazmat/primitives/ciphers/algorithms.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives.ciphers import (\n BlockCipherAlgorithm, CipherAlgorithm\n)\nfrom cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce\n\n\ndef _verify_key_size(algorithm, key):\n # Verify that the key is instance of bytes\n utils._check_bytes(\"key\", key)\n\n # Verify that the key size matches the expected key size\n if len(key) * 8 not in algorithm.key_sizes:\n raise ValueError(\"Invalid key size ({0}) for {1}.\".format(\n len(key) * 8, algorithm.name\n ))\n return key\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass AES(object):\n name = \"AES\"\n block_size = 128\n # 512 added to support AES-256-XTS, which uses 512-bit keys\n key_sizes = frozenset([128, 192, 256, 512])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass Camellia(object):\n name = \"camellia\"\n block_size = 128\n key_sizes = frozenset([128, 192, 256])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass TripleDES(object):\n name = \"3DES\"\n block_size = 64\n key_sizes = frozenset([64, 128, 192])\n\n def __init__(self, key):\n if len(key) == 8:\n key += key + key\n elif len(key) == 16:\n key += key[:8]\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass Blowfish(object):\n name = \"Blowfish\"\n block_size = 64\n key_sizes = frozenset(range(32, 449, 8))\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass CAST5(object):\n name = \"CAST5\"\n block_size = 64\n key_sizes = frozenset(range(40, 129, 8))\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\nclass ARC4(object):\n name = \"RC4\"\n key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\nclass IDEA(object):\n name = \"IDEA\"\n block_size = 64\n key_sizes = frozenset([128])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass SEED(object):\n name = \"SEED\"\n block_size = 128\n key_sizes = frozenset([128])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\[email protected]_interface(ModeWithNonce)\nclass ChaCha20(object):\n name = \"ChaCha20\"\n key_sizes = frozenset([256])\n\n def __init__(self, key, nonce):\n self.key = _verify_key_size(self, key)\n if not isinstance(nonce, bytes):\n raise TypeError(\"nonce must be bytes\")\n\n if len(nonce) != 16:\n raise ValueError(\"nonce must be 128-bits (16 bytes)\")\n\n self._nonce = nonce\n\n nonce = utils.read_only_property(\"_nonce\")\n\n @property\n def key_size(self):\n return len(self.key) * 8\n", "path": "src/cryptography/hazmat/primitives/ciphers/algorithms.py"}]} | 2,327 | 155 |
gh_patches_debug_6367 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-1769 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
raise StopIteration in generators is deprecated
```
...
/Users/Vic/projects/brain2/brain/utils/user_progress.py:686: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration
for action in actions:
/Users/Vic/projects/brain2/brain/utils/user_course_state.py:317: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration
PlayedTask(
/Users/Vic/projects/brain2/venv/lib/python3.6/site-packages/mongoengine/dereference.py:33: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration
...
```
https://stackoverflow.com/questions/14183803/in-pythons-generators-what-is-the-difference-between-raise-stopiteration-and
https://github.com/MongoEngine/mongoengine/blob/5bdd35464b89dd2def1dcb0464e244322f1bc757/mongoengine/queryset/queryset.py#L95
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mongoengine/queryset/queryset.py`
Content:
```
1 import six
2
3 from mongoengine.errors import OperationError
4 from mongoengine.queryset.base import (BaseQuerySet, CASCADE, DENY, DO_NOTHING,
5 NULLIFY, PULL)
6
7 __all__ = ('QuerySet', 'QuerySetNoCache', 'DO_NOTHING', 'NULLIFY', 'CASCADE',
8 'DENY', 'PULL')
9
10 # The maximum number of items to display in a QuerySet.__repr__
11 REPR_OUTPUT_SIZE = 20
12 ITER_CHUNK_SIZE = 100
13
14
15 class QuerySet(BaseQuerySet):
16 """The default queryset, that builds queries and handles a set of results
17 returned from a query.
18
19 Wraps a MongoDB cursor, providing :class:`~mongoengine.Document` objects as
20 the results.
21 """
22
23 _has_more = True
24 _len = None
25 _result_cache = None
26
27 def __iter__(self):
28 """Iteration utilises a results cache which iterates the cursor
29 in batches of ``ITER_CHUNK_SIZE``.
30
31 If ``self._has_more`` the cursor hasn't been exhausted so cache then
32 batch. Otherwise iterate the result_cache.
33 """
34 self._iter = True
35
36 if self._has_more:
37 return self._iter_results()
38
39 # iterating over the cache.
40 return iter(self._result_cache)
41
42 def __len__(self):
43 """Since __len__ is called quite frequently (for example, as part of
44 list(qs)), we populate the result cache and cache the length.
45 """
46 if self._len is not None:
47 return self._len
48
49 # Populate the result cache with *all* of the docs in the cursor
50 if self._has_more:
51 list(self._iter_results())
52
53 # Cache the length of the complete result cache and return it
54 self._len = len(self._result_cache)
55 return self._len
56
57 def __repr__(self):
58 """Provide a string representation of the QuerySet"""
59 if self._iter:
60 return '.. queryset mid-iteration ..'
61
62 self._populate_cache()
63 data = self._result_cache[:REPR_OUTPUT_SIZE + 1]
64 if len(data) > REPR_OUTPUT_SIZE:
65 data[-1] = '...(remaining elements truncated)...'
66 return repr(data)
67
68 def _iter_results(self):
69 """A generator for iterating over the result cache.
70
71 Also populates the cache if there are more possible results to
72 yield. Raises StopIteration when there are no more results.
73 """
74 if self._result_cache is None:
75 self._result_cache = []
76
77 pos = 0
78 while True:
79
80 # For all positions lower than the length of the current result
81 # cache, serve the docs straight from the cache w/o hitting the
82 # database.
83 # XXX it's VERY important to compute the len within the `while`
84 # condition because the result cache might expand mid-iteration
85 # (e.g. if we call len(qs) inside a loop that iterates over the
86 # queryset). Fortunately len(list) is O(1) in Python, so this
87 # doesn't cause performance issues.
88 while pos < len(self._result_cache):
89 yield self._result_cache[pos]
90 pos += 1
91
92 # Raise StopIteration if we already established there were no more
93 # docs in the db cursor.
94 if not self._has_more:
95 raise StopIteration
96
97 # Otherwise, populate more of the cache and repeat.
98 if len(self._result_cache) <= pos:
99 self._populate_cache()
100
101 def _populate_cache(self):
102 """
103 Populates the result cache with ``ITER_CHUNK_SIZE`` more entries
104 (until the cursor is exhausted).
105 """
106 if self._result_cache is None:
107 self._result_cache = []
108
109 # Skip populating the cache if we already established there are no
110 # more docs to pull from the database.
111 if not self._has_more:
112 return
113
114 # Pull in ITER_CHUNK_SIZE docs from the database and store them in
115 # the result cache.
116 try:
117 for _ in six.moves.range(ITER_CHUNK_SIZE):
118 self._result_cache.append(self.next())
119 except StopIteration:
120 # Getting this exception means there are no more docs in the
121 # db cursor. Set _has_more to False so that we can use that
122 # information in other places.
123 self._has_more = False
124
125 def count(self, with_limit_and_skip=False):
126 """Count the selected elements in the query.
127
128 :param with_limit_and_skip (optional): take any :meth:`limit` or
129 :meth:`skip` that has been applied to this cursor into account when
130 getting the count
131 """
132 if with_limit_and_skip is False:
133 return super(QuerySet, self).count(with_limit_and_skip)
134
135 if self._len is None:
136 self._len = super(QuerySet, self).count(with_limit_and_skip)
137
138 return self._len
139
140 def no_cache(self):
141 """Convert to a non-caching queryset
142
143 .. versionadded:: 0.8.3 Convert to non caching queryset
144 """
145 if self._result_cache is not None:
146 raise OperationError('QuerySet already cached')
147
148 return self._clone_into(QuerySetNoCache(self._document,
149 self._collection))
150
151
152 class QuerySetNoCache(BaseQuerySet):
153 """A non caching QuerySet"""
154
155 def cache(self):
156 """Convert to a caching queryset
157
158 .. versionadded:: 0.8.3 Convert to caching queryset
159 """
160 return self._clone_into(QuerySet(self._document, self._collection))
161
162 def __repr__(self):
163 """Provides the string representation of the QuerySet
164
165 .. versionchanged:: 0.6.13 Now doesnt modify the cursor
166 """
167 if self._iter:
168 return '.. queryset mid-iteration ..'
169
170 data = []
171 for _ in six.moves.range(REPR_OUTPUT_SIZE + 1):
172 try:
173 data.append(self.next())
174 except StopIteration:
175 break
176
177 if len(data) > REPR_OUTPUT_SIZE:
178 data[-1] = '...(remaining elements truncated)...'
179
180 self.rewind()
181 return repr(data)
182
183 def __iter__(self):
184 queryset = self
185 if queryset._iter:
186 queryset = self.clone()
187 queryset.rewind()
188 return queryset
189
190
191 class QuerySetNoDeRef(QuerySet):
192 """Special no_dereference QuerySet"""
193
194 def __dereference(items, max_depth=1, instance=None, name=None):
195 return items
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mongoengine/queryset/queryset.py b/mongoengine/queryset/queryset.py
--- a/mongoengine/queryset/queryset.py
+++ b/mongoengine/queryset/queryset.py
@@ -92,7 +92,7 @@
# Raise StopIteration if we already established there were no more
# docs in the db cursor.
if not self._has_more:
- raise StopIteration
+ return
# Otherwise, populate more of the cache and repeat.
if len(self._result_cache) <= pos:
| {"golden_diff": "diff --git a/mongoengine/queryset/queryset.py b/mongoengine/queryset/queryset.py\n--- a/mongoengine/queryset/queryset.py\n+++ b/mongoengine/queryset/queryset.py\n@@ -92,7 +92,7 @@\n # Raise StopIteration if we already established there were no more\n # docs in the db cursor.\n if not self._has_more:\n- raise StopIteration\n+ return\n \n # Otherwise, populate more of the cache and repeat.\n if len(self._result_cache) <= pos:\n", "issue": "raise StopIteration in generators is deprecated\n```\r\n...\r\n/Users/Vic/projects/brain2/brain/utils/user_progress.py:686: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration\r\n for action in actions:\r\n/Users/Vic/projects/brain2/brain/utils/user_course_state.py:317: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration\r\n PlayedTask(\r\n/Users/Vic/projects/brain2/venv/lib/python3.6/site-packages/mongoengine/dereference.py:33: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration\r\n...\r\n```\r\nhttps://stackoverflow.com/questions/14183803/in-pythons-generators-what-is-the-difference-between-raise-stopiteration-and\r\n\r\nhttps://github.com/MongoEngine/mongoengine/blob/5bdd35464b89dd2def1dcb0464e244322f1bc757/mongoengine/queryset/queryset.py#L95\n", "before_files": [{"content": "import six\n\nfrom mongoengine.errors import OperationError\nfrom mongoengine.queryset.base import (BaseQuerySet, CASCADE, DENY, DO_NOTHING,\n NULLIFY, PULL)\n\n__all__ = ('QuerySet', 'QuerySetNoCache', 'DO_NOTHING', 'NULLIFY', 'CASCADE',\n 'DENY', 'PULL')\n\n# The maximum number of items to display in a QuerySet.__repr__\nREPR_OUTPUT_SIZE = 20\nITER_CHUNK_SIZE = 100\n\n\nclass QuerySet(BaseQuerySet):\n \"\"\"The default queryset, that builds queries and handles a set of results\n returned from a query.\n\n Wraps a MongoDB cursor, providing :class:`~mongoengine.Document` objects as\n the results.\n \"\"\"\n\n _has_more = True\n _len = None\n _result_cache = None\n\n def __iter__(self):\n \"\"\"Iteration utilises a results cache which iterates the cursor\n in batches of ``ITER_CHUNK_SIZE``.\n\n If ``self._has_more`` the cursor hasn't been exhausted so cache then\n batch. Otherwise iterate the result_cache.\n \"\"\"\n self._iter = True\n\n if self._has_more:\n return self._iter_results()\n\n # iterating over the cache.\n return iter(self._result_cache)\n\n def __len__(self):\n \"\"\"Since __len__ is called quite frequently (for example, as part of\n list(qs)), we populate the result cache and cache the length.\n \"\"\"\n if self._len is not None:\n return self._len\n\n # Populate the result cache with *all* of the docs in the cursor\n if self._has_more:\n list(self._iter_results())\n\n # Cache the length of the complete result cache and return it\n self._len = len(self._result_cache)\n return self._len\n\n def __repr__(self):\n \"\"\"Provide a string representation of the QuerySet\"\"\"\n if self._iter:\n return '.. queryset mid-iteration ..'\n\n self._populate_cache()\n data = self._result_cache[:REPR_OUTPUT_SIZE + 1]\n if len(data) > REPR_OUTPUT_SIZE:\n data[-1] = '...(remaining elements truncated)...'\n return repr(data)\n\n def _iter_results(self):\n \"\"\"A generator for iterating over the result cache.\n\n Also populates the cache if there are more possible results to\n yield. Raises StopIteration when there are no more results.\n \"\"\"\n if self._result_cache is None:\n self._result_cache = []\n\n pos = 0\n while True:\n\n # For all positions lower than the length of the current result\n # cache, serve the docs straight from the cache w/o hitting the\n # database.\n # XXX it's VERY important to compute the len within the `while`\n # condition because the result cache might expand mid-iteration\n # (e.g. if we call len(qs) inside a loop that iterates over the\n # queryset). Fortunately len(list) is O(1) in Python, so this\n # doesn't cause performance issues.\n while pos < len(self._result_cache):\n yield self._result_cache[pos]\n pos += 1\n\n # Raise StopIteration if we already established there were no more\n # docs in the db cursor.\n if not self._has_more:\n raise StopIteration\n\n # Otherwise, populate more of the cache and repeat.\n if len(self._result_cache) <= pos:\n self._populate_cache()\n\n def _populate_cache(self):\n \"\"\"\n Populates the result cache with ``ITER_CHUNK_SIZE`` more entries\n (until the cursor is exhausted).\n \"\"\"\n if self._result_cache is None:\n self._result_cache = []\n\n # Skip populating the cache if we already established there are no\n # more docs to pull from the database.\n if not self._has_more:\n return\n\n # Pull in ITER_CHUNK_SIZE docs from the database and store them in\n # the result cache.\n try:\n for _ in six.moves.range(ITER_CHUNK_SIZE):\n self._result_cache.append(self.next())\n except StopIteration:\n # Getting this exception means there are no more docs in the\n # db cursor. Set _has_more to False so that we can use that\n # information in other places.\n self._has_more = False\n\n def count(self, with_limit_and_skip=False):\n \"\"\"Count the selected elements in the query.\n\n :param with_limit_and_skip (optional): take any :meth:`limit` or\n :meth:`skip` that has been applied to this cursor into account when\n getting the count\n \"\"\"\n if with_limit_and_skip is False:\n return super(QuerySet, self).count(with_limit_and_skip)\n\n if self._len is None:\n self._len = super(QuerySet, self).count(with_limit_and_skip)\n\n return self._len\n\n def no_cache(self):\n \"\"\"Convert to a non-caching queryset\n\n .. versionadded:: 0.8.3 Convert to non caching queryset\n \"\"\"\n if self._result_cache is not None:\n raise OperationError('QuerySet already cached')\n\n return self._clone_into(QuerySetNoCache(self._document,\n self._collection))\n\n\nclass QuerySetNoCache(BaseQuerySet):\n \"\"\"A non caching QuerySet\"\"\"\n\n def cache(self):\n \"\"\"Convert to a caching queryset\n\n .. versionadded:: 0.8.3 Convert to caching queryset\n \"\"\"\n return self._clone_into(QuerySet(self._document, self._collection))\n\n def __repr__(self):\n \"\"\"Provides the string representation of the QuerySet\n\n .. versionchanged:: 0.6.13 Now doesnt modify the cursor\n \"\"\"\n if self._iter:\n return '.. queryset mid-iteration ..'\n\n data = []\n for _ in six.moves.range(REPR_OUTPUT_SIZE + 1):\n try:\n data.append(self.next())\n except StopIteration:\n break\n\n if len(data) > REPR_OUTPUT_SIZE:\n data[-1] = '...(remaining elements truncated)...'\n\n self.rewind()\n return repr(data)\n\n def __iter__(self):\n queryset = self\n if queryset._iter:\n queryset = self.clone()\n queryset.rewind()\n return queryset\n\n\nclass QuerySetNoDeRef(QuerySet):\n \"\"\"Special no_dereference QuerySet\"\"\"\n\n def __dereference(items, max_depth=1, instance=None, name=None):\n return items\n", "path": "mongoengine/queryset/queryset.py"}], "after_files": [{"content": "import six\n\nfrom mongoengine.errors import OperationError\nfrom mongoengine.queryset.base import (BaseQuerySet, CASCADE, DENY, DO_NOTHING,\n NULLIFY, PULL)\n\n__all__ = ('QuerySet', 'QuerySetNoCache', 'DO_NOTHING', 'NULLIFY', 'CASCADE',\n 'DENY', 'PULL')\n\n# The maximum number of items to display in a QuerySet.__repr__\nREPR_OUTPUT_SIZE = 20\nITER_CHUNK_SIZE = 100\n\n\nclass QuerySet(BaseQuerySet):\n \"\"\"The default queryset, that builds queries and handles a set of results\n returned from a query.\n\n Wraps a MongoDB cursor, providing :class:`~mongoengine.Document` objects as\n the results.\n \"\"\"\n\n _has_more = True\n _len = None\n _result_cache = None\n\n def __iter__(self):\n \"\"\"Iteration utilises a results cache which iterates the cursor\n in batches of ``ITER_CHUNK_SIZE``.\n\n If ``self._has_more`` the cursor hasn't been exhausted so cache then\n batch. Otherwise iterate the result_cache.\n \"\"\"\n self._iter = True\n\n if self._has_more:\n return self._iter_results()\n\n # iterating over the cache.\n return iter(self._result_cache)\n\n def __len__(self):\n \"\"\"Since __len__ is called quite frequently (for example, as part of\n list(qs)), we populate the result cache and cache the length.\n \"\"\"\n if self._len is not None:\n return self._len\n\n # Populate the result cache with *all* of the docs in the cursor\n if self._has_more:\n list(self._iter_results())\n\n # Cache the length of the complete result cache and return it\n self._len = len(self._result_cache)\n return self._len\n\n def __repr__(self):\n \"\"\"Provide a string representation of the QuerySet\"\"\"\n if self._iter:\n return '.. queryset mid-iteration ..'\n\n self._populate_cache()\n data = self._result_cache[:REPR_OUTPUT_SIZE + 1]\n if len(data) > REPR_OUTPUT_SIZE:\n data[-1] = '...(remaining elements truncated)...'\n return repr(data)\n\n def _iter_results(self):\n \"\"\"A generator for iterating over the result cache.\n\n Also populates the cache if there are more possible results to\n yield. Raises StopIteration when there are no more results.\n \"\"\"\n if self._result_cache is None:\n self._result_cache = []\n\n pos = 0\n while True:\n\n # For all positions lower than the length of the current result\n # cache, serve the docs straight from the cache w/o hitting the\n # database.\n # XXX it's VERY important to compute the len within the `while`\n # condition because the result cache might expand mid-iteration\n # (e.g. if we call len(qs) inside a loop that iterates over the\n # queryset). Fortunately len(list) is O(1) in Python, so this\n # doesn't cause performance issues.\n while pos < len(self._result_cache):\n yield self._result_cache[pos]\n pos += 1\n\n # Raise StopIteration if we already established there were no more\n # docs in the db cursor.\n if not self._has_more:\n return\n\n # Otherwise, populate more of the cache and repeat.\n if len(self._result_cache) <= pos:\n self._populate_cache()\n\n def _populate_cache(self):\n \"\"\"\n Populates the result cache with ``ITER_CHUNK_SIZE`` more entries\n (until the cursor is exhausted).\n \"\"\"\n if self._result_cache is None:\n self._result_cache = []\n\n # Skip populating the cache if we already established there are no\n # more docs to pull from the database.\n if not self._has_more:\n return\n\n # Pull in ITER_CHUNK_SIZE docs from the database and store them in\n # the result cache.\n try:\n for _ in six.moves.range(ITER_CHUNK_SIZE):\n self._result_cache.append(self.next())\n except StopIteration:\n # Getting this exception means there are no more docs in the\n # db cursor. Set _has_more to False so that we can use that\n # information in other places.\n self._has_more = False\n\n def count(self, with_limit_and_skip=False):\n \"\"\"Count the selected elements in the query.\n\n :param with_limit_and_skip (optional): take any :meth:`limit` or\n :meth:`skip` that has been applied to this cursor into account when\n getting the count\n \"\"\"\n if with_limit_and_skip is False:\n return super(QuerySet, self).count(with_limit_and_skip)\n\n if self._len is None:\n self._len = super(QuerySet, self).count(with_limit_and_skip)\n\n return self._len\n\n def no_cache(self):\n \"\"\"Convert to a non-caching queryset\n\n .. versionadded:: 0.8.3 Convert to non caching queryset\n \"\"\"\n if self._result_cache is not None:\n raise OperationError('QuerySet already cached')\n\n return self._clone_into(QuerySetNoCache(self._document,\n self._collection))\n\n\nclass QuerySetNoCache(BaseQuerySet):\n \"\"\"A non caching QuerySet\"\"\"\n\n def cache(self):\n \"\"\"Convert to a caching queryset\n\n .. versionadded:: 0.8.3 Convert to caching queryset\n \"\"\"\n return self._clone_into(QuerySet(self._document, self._collection))\n\n def __repr__(self):\n \"\"\"Provides the string representation of the QuerySet\n\n .. versionchanged:: 0.6.13 Now doesnt modify the cursor\n \"\"\"\n if self._iter:\n return '.. queryset mid-iteration ..'\n\n data = []\n for _ in six.moves.range(REPR_OUTPUT_SIZE + 1):\n try:\n data.append(self.next())\n except StopIteration:\n break\n\n if len(data) > REPR_OUTPUT_SIZE:\n data[-1] = '...(remaining elements truncated)...'\n\n self.rewind()\n return repr(data)\n\n def __iter__(self):\n queryset = self\n if queryset._iter:\n queryset = self.clone()\n queryset.rewind()\n return queryset\n\n\nclass QuerySetNoDeRef(QuerySet):\n \"\"\"Special no_dereference QuerySet\"\"\"\n\n def __dereference(items, max_depth=1, instance=None, name=None):\n return items\n", "path": "mongoengine/queryset/queryset.py"}]} | 2,407 | 119 |
gh_patches_debug_41711 | rasdani/github-patches | git_diff | python-poetry__poetry-5320 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrading packages from private repositories fails using cert and client-cert
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **Poetry version**: **1.1.4** <!-- Replace with version -->
## Issue
<!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ -->
To configure the cert and client-cert I am using:
poetry config certificates.private-pypi.cert /path/to/ca.crt
poetry config certificates.private-pypi.client-cert /path/to/client.pem
... from the custom CA & TLS [instructions](https://python-poetry.org/docs/repositories/#custom-certificate-authority-and-mutual-tls-authentication).
And from pyproject.toml:
[[tool.poetry.source]]
name = "private-pypi"
url = "https://private-pypi/simple"
When doing a `poetry update` to get the latest packages, it fails to get packages from private repositories.
It fails with:
Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify
failed: self signed certificate in certificate chain (_ssl.c:1091)'))
... I started to debug that error it looked like the cert and CA were simply not provided in the http request. Actually here's what I saw:
A single successful request where the cert and CA were passed:
GET https://private-pypi/simple/<package_name>/
And then 5 failed retries that had the SSLCertVerificationError:
GET https://private-pypi/packages/<package_name>/<package>-py3-none-any.whl#md5=blah
And in those 5 requests it looked like the cert and CA were not provided. So in my CI build I am not able to install `<package>` which happens to be an upgrade. I believe this started to happen with 1.1.0 as I can force version 1.0.10 and then the CI build will succeed and the package will install successfully.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/poetry/utils/authenticator.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import time
5 import urllib.parse
6
7 from typing import TYPE_CHECKING
8 from typing import Any
9
10 import requests
11 import requests.auth
12 import requests.exceptions
13
14 from poetry.exceptions import PoetryException
15 from poetry.utils.password_manager import PasswordManager
16
17
18 if TYPE_CHECKING:
19 from cleo.io.io import IO
20
21 from poetry.config.config import Config
22
23
24 logger = logging.getLogger()
25
26
27 class Authenticator:
28 def __init__(self, config: Config, io: IO | None = None) -> None:
29 self._config = config
30 self._io = io
31 self._session = None
32 self._credentials = {}
33 self._password_manager = PasswordManager(self._config)
34
35 def _log(self, message: str, level: str = "debug") -> None:
36 if self._io is not None:
37 self._io.write_line(f"<{level}>{message}</{level}>")
38 else:
39 getattr(logger, level, logger.debug)(message)
40
41 @property
42 def session(self) -> requests.Session:
43 if self._session is None:
44 self._session = requests.Session()
45
46 return self._session
47
48 def __del__(self) -> None:
49 if self._session is not None:
50 self._session.close()
51
52 def request(self, method: str, url: str, **kwargs: Any) -> requests.Response:
53 request = requests.Request(method, url)
54 username, password = self.get_credentials_for_url(url)
55
56 if username is not None and password is not None:
57 request = requests.auth.HTTPBasicAuth(username, password)(request)
58
59 session = self.session
60 prepared_request = session.prepare_request(request)
61
62 proxies = kwargs.get("proxies", {})
63 stream = kwargs.get("stream")
64 verify = kwargs.get("verify")
65 cert = kwargs.get("cert")
66
67 settings = session.merge_environment_settings(
68 prepared_request.url, proxies, stream, verify, cert
69 )
70
71 # Send the request.
72 send_kwargs = {
73 "timeout": kwargs.get("timeout"),
74 "allow_redirects": kwargs.get("allow_redirects", True),
75 }
76 send_kwargs.update(settings)
77
78 attempt = 0
79
80 while True:
81 is_last_attempt = attempt >= 5
82 try:
83 resp = session.send(prepared_request, **send_kwargs)
84 except (requests.exceptions.ConnectionError, OSError) as e:
85 if is_last_attempt:
86 raise e
87 else:
88 if resp.status_code not in [502, 503, 504] or is_last_attempt:
89 resp.raise_for_status()
90 return resp
91
92 if not is_last_attempt:
93 attempt += 1
94 delay = 0.5 * attempt
95 self._log(f"Retrying HTTP request in {delay} seconds.", level="debug")
96 time.sleep(delay)
97 continue
98
99 # this should never really be hit under any sane circumstance
100 raise PoetryException("Failed HTTP {} request", method.upper())
101
102 def get_credentials_for_url(self, url: str) -> tuple[str | None, str | None]:
103 parsed_url = urllib.parse.urlsplit(url)
104
105 netloc = parsed_url.netloc
106
107 credentials = self._credentials.get(netloc, (None, None))
108
109 if credentials == (None, None):
110 if "@" not in netloc:
111 credentials = self._get_credentials_for_netloc(netloc)
112 else:
113 # Split from the right because that's how urllib.parse.urlsplit()
114 # behaves if more than one @ is present (which can be checked using
115 # the password attribute of urlsplit()'s return value).
116 auth, netloc = netloc.rsplit("@", 1)
117 # Split from the left because that's how urllib.parse.urlsplit()
118 # behaves if more than one : is present (which again can be checked
119 # using the password attribute of the return value)
120 credentials = auth.split(":", 1) if ":" in auth else (auth, None)
121 credentials = tuple(
122 None if x is None else urllib.parse.unquote(x) for x in credentials
123 )
124
125 if credentials[0] is not None or credentials[1] is not None:
126 credentials = (credentials[0] or "", credentials[1] or "")
127
128 self._credentials[netloc] = credentials
129
130 return credentials[0], credentials[1]
131
132 def get_pypi_token(self, name: str) -> str:
133 return self._password_manager.get_pypi_token(name)
134
135 def get_http_auth(self, name: str) -> dict[str, str] | None:
136 return self._get_http_auth(name, None)
137
138 def _get_http_auth(self, name: str, netloc: str | None) -> dict[str, str] | None:
139 if name == "pypi":
140 url = "https://upload.pypi.org/legacy/"
141 else:
142 url = self._config.get(f"repositories.{name}.url")
143 if not url:
144 return None
145
146 parsed_url = urllib.parse.urlsplit(url)
147
148 if netloc is None or netloc == parsed_url.netloc:
149 auth = self._password_manager.get_http_auth(name)
150
151 if auth is None or auth["password"] is None:
152 username = auth["username"] if auth else None
153 auth = self._get_credentials_for_netloc_from_keyring(
154 url, parsed_url.netloc, username
155 )
156
157 return auth
158
159 def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:
160 for repository_name in self._config.get("repositories", []):
161 auth = self._get_http_auth(repository_name, netloc)
162
163 if auth is None:
164 continue
165
166 return auth["username"], auth["password"]
167
168 return None, None
169
170 def _get_credentials_for_netloc_from_keyring(
171 self, url: str, netloc: str, username: str | None
172 ) -> dict[str, str] | None:
173 import keyring
174
175 cred = keyring.get_credential(url, username)
176 if cred is not None:
177 return {
178 "username": cred.username,
179 "password": cred.password,
180 }
181
182 cred = keyring.get_credential(netloc, username)
183 if cred is not None:
184 return {
185 "username": cred.username,
186 "password": cred.password,
187 }
188
189 if username:
190 return {
191 "username": username,
192 "password": None,
193 }
194
195 return None
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/poetry/utils/authenticator.py b/src/poetry/utils/authenticator.py
--- a/src/poetry/utils/authenticator.py
+++ b/src/poetry/utils/authenticator.py
@@ -6,16 +6,21 @@
from typing import TYPE_CHECKING
from typing import Any
+from typing import Iterator
import requests
import requests.auth
import requests.exceptions
from poetry.exceptions import PoetryException
+from poetry.utils.helpers import get_cert
+from poetry.utils.helpers import get_client_cert
from poetry.utils.password_manager import PasswordManager
if TYPE_CHECKING:
+ from pathlib import Path
+
from cleo.io.io import IO
from poetry.config.config import Config
@@ -30,6 +35,7 @@
self._io = io
self._session = None
self._credentials = {}
+ self._certs = {}
self._password_manager = PasswordManager(self._config)
def _log(self, message: str, level: str = "debug") -> None:
@@ -61,8 +67,16 @@
proxies = kwargs.get("proxies", {})
stream = kwargs.get("stream")
- verify = kwargs.get("verify")
- cert = kwargs.get("cert")
+
+ certs = self.get_certs_for_url(url)
+ verify = kwargs.get("verify") or certs.get("verify")
+ cert = kwargs.get("cert") or certs.get("cert")
+
+ if cert is not None:
+ cert = str(cert)
+
+ if verify is not None:
+ verify = str(verify)
settings = session.merge_environment_settings(
prepared_request.url, proxies, stream, verify, cert
@@ -157,7 +171,7 @@
return auth
def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:
- for repository_name in self._config.get("repositories", []):
+ for repository_name, _ in self._get_repository_netlocs():
auth = self._get_http_auth(repository_name, netloc)
if auth is None:
@@ -167,6 +181,22 @@
return None, None
+ def get_certs_for_url(self, url: str) -> dict[str, Path | None]:
+ parsed_url = urllib.parse.urlsplit(url)
+
+ netloc = parsed_url.netloc
+
+ return self._certs.setdefault(
+ netloc,
+ self._get_certs_for_netloc_from_config(netloc),
+ )
+
+ def _get_repository_netlocs(self) -> Iterator[tuple[str, str]]:
+ for repository_name in self._config.get("repositories", []):
+ url = self._config.get(f"repositories.{repository_name}.url")
+ parsed_url = urllib.parse.urlsplit(url)
+ yield repository_name, parsed_url.netloc
+
def _get_credentials_for_netloc_from_keyring(
self, url: str, netloc: str, username: str | None
) -> dict[str, str] | None:
@@ -193,3 +223,14 @@
}
return None
+
+ def _get_certs_for_netloc_from_config(self, netloc: str) -> dict[str, Path | None]:
+ certs = {"cert": None, "verify": None}
+
+ for repository_name, repository_netloc in self._get_repository_netlocs():
+ if netloc == repository_netloc:
+ certs["cert"] = get_client_cert(self._config, repository_name)
+ certs["verify"] = get_cert(self._config, repository_name)
+ break
+
+ return certs
| {"golden_diff": "diff --git a/src/poetry/utils/authenticator.py b/src/poetry/utils/authenticator.py\n--- a/src/poetry/utils/authenticator.py\n+++ b/src/poetry/utils/authenticator.py\n@@ -6,16 +6,21 @@\n \n from typing import TYPE_CHECKING\n from typing import Any\n+from typing import Iterator\n \n import requests\n import requests.auth\n import requests.exceptions\n \n from poetry.exceptions import PoetryException\n+from poetry.utils.helpers import get_cert\n+from poetry.utils.helpers import get_client_cert\n from poetry.utils.password_manager import PasswordManager\n \n \n if TYPE_CHECKING:\n+ from pathlib import Path\n+\n from cleo.io.io import IO\n \n from poetry.config.config import Config\n@@ -30,6 +35,7 @@\n self._io = io\n self._session = None\n self._credentials = {}\n+ self._certs = {}\n self._password_manager = PasswordManager(self._config)\n \n def _log(self, message: str, level: str = \"debug\") -> None:\n@@ -61,8 +67,16 @@\n \n proxies = kwargs.get(\"proxies\", {})\n stream = kwargs.get(\"stream\")\n- verify = kwargs.get(\"verify\")\n- cert = kwargs.get(\"cert\")\n+\n+ certs = self.get_certs_for_url(url)\n+ verify = kwargs.get(\"verify\") or certs.get(\"verify\")\n+ cert = kwargs.get(\"cert\") or certs.get(\"cert\")\n+\n+ if cert is not None:\n+ cert = str(cert)\n+\n+ if verify is not None:\n+ verify = str(verify)\n \n settings = session.merge_environment_settings(\n prepared_request.url, proxies, stream, verify, cert\n@@ -157,7 +171,7 @@\n return auth\n \n def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:\n- for repository_name in self._config.get(\"repositories\", []):\n+ for repository_name, _ in self._get_repository_netlocs():\n auth = self._get_http_auth(repository_name, netloc)\n \n if auth is None:\n@@ -167,6 +181,22 @@\n \n return None, None\n \n+ def get_certs_for_url(self, url: str) -> dict[str, Path | None]:\n+ parsed_url = urllib.parse.urlsplit(url)\n+\n+ netloc = parsed_url.netloc\n+\n+ return self._certs.setdefault(\n+ netloc,\n+ self._get_certs_for_netloc_from_config(netloc),\n+ )\n+\n+ def _get_repository_netlocs(self) -> Iterator[tuple[str, str]]:\n+ for repository_name in self._config.get(\"repositories\", []):\n+ url = self._config.get(f\"repositories.{repository_name}.url\")\n+ parsed_url = urllib.parse.urlsplit(url)\n+ yield repository_name, parsed_url.netloc\n+\n def _get_credentials_for_netloc_from_keyring(\n self, url: str, netloc: str, username: str | None\n ) -> dict[str, str] | None:\n@@ -193,3 +223,14 @@\n }\n \n return None\n+\n+ def _get_certs_for_netloc_from_config(self, netloc: str) -> dict[str, Path | None]:\n+ certs = {\"cert\": None, \"verify\": None}\n+\n+ for repository_name, repository_netloc in self._get_repository_netlocs():\n+ if netloc == repository_netloc:\n+ certs[\"cert\"] = get_client_cert(self._config, repository_name)\n+ certs[\"verify\"] = get_cert(self._config, repository_name)\n+ break\n+\n+ return certs\n", "issue": "Upgrading packages from private repositories fails using cert and client-cert \n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **Poetry version**: **1.1.4** <!-- Replace with version -->\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\n\r\nTo configure the cert and client-cert I am using:\r\n\r\n poetry config certificates.private-pypi.cert /path/to/ca.crt\r\n poetry config certificates.private-pypi.client-cert /path/to/client.pem\r\n\r\n... from the custom CA & TLS [instructions](https://python-poetry.org/docs/repositories/#custom-certificate-authority-and-mutual-tls-authentication).\r\n\r\nAnd from pyproject.toml:\r\n\r\n [[tool.poetry.source]]\r\n name = \"private-pypi\"\r\n url = \"https://private-pypi/simple\"\r\n\r\nWhen doing a `poetry update` to get the latest packages, it fails to get packages from private repositories. \r\n\r\nIt fails with:\r\n\r\n Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify\r\n failed: self signed certificate in certificate chain (_ssl.c:1091)'))\r\n\r\n... I started to debug that error it looked like the cert and CA were simply not provided in the http request. Actually here's what I saw:\r\n\r\nA single successful request where the cert and CA were passed:\r\n\r\n GET https://private-pypi/simple/<package_name>/\r\n\r\nAnd then 5 failed retries that had the SSLCertVerificationError:\r\n\r\n GET https://private-pypi/packages/<package_name>/<package>-py3-none-any.whl#md5=blah\r\n\r\nAnd in those 5 requests it looked like the cert and CA were not provided. So in my CI build I am not able to install `<package>` which happens to be an upgrade. I believe this started to happen with 1.1.0 as I can force version 1.0.10 and then the CI build will succeed and the package will install successfully.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport time\nimport urllib.parse\n\nfrom typing import TYPE_CHECKING\nfrom typing import Any\n\nimport requests\nimport requests.auth\nimport requests.exceptions\n\nfrom poetry.exceptions import PoetryException\nfrom poetry.utils.password_manager import PasswordManager\n\n\nif TYPE_CHECKING:\n from cleo.io.io import IO\n\n from poetry.config.config import Config\n\n\nlogger = logging.getLogger()\n\n\nclass Authenticator:\n def __init__(self, config: Config, io: IO | None = None) -> None:\n self._config = config\n self._io = io\n self._session = None\n self._credentials = {}\n self._password_manager = PasswordManager(self._config)\n\n def _log(self, message: str, level: str = \"debug\") -> None:\n if self._io is not None:\n self._io.write_line(f\"<{level}>{message}</{level}>\")\n else:\n getattr(logger, level, logger.debug)(message)\n\n @property\n def session(self) -> requests.Session:\n if self._session is None:\n self._session = requests.Session()\n\n return self._session\n\n def __del__(self) -> None:\n if self._session is not None:\n self._session.close()\n\n def request(self, method: str, url: str, **kwargs: Any) -> requests.Response:\n request = requests.Request(method, url)\n username, password = self.get_credentials_for_url(url)\n\n if username is not None and password is not None:\n request = requests.auth.HTTPBasicAuth(username, password)(request)\n\n session = self.session\n prepared_request = session.prepare_request(request)\n\n proxies = kwargs.get(\"proxies\", {})\n stream = kwargs.get(\"stream\")\n verify = kwargs.get(\"verify\")\n cert = kwargs.get(\"cert\")\n\n settings = session.merge_environment_settings(\n prepared_request.url, proxies, stream, verify, cert\n )\n\n # Send the request.\n send_kwargs = {\n \"timeout\": kwargs.get(\"timeout\"),\n \"allow_redirects\": kwargs.get(\"allow_redirects\", True),\n }\n send_kwargs.update(settings)\n\n attempt = 0\n\n while True:\n is_last_attempt = attempt >= 5\n try:\n resp = session.send(prepared_request, **send_kwargs)\n except (requests.exceptions.ConnectionError, OSError) as e:\n if is_last_attempt:\n raise e\n else:\n if resp.status_code not in [502, 503, 504] or is_last_attempt:\n resp.raise_for_status()\n return resp\n\n if not is_last_attempt:\n attempt += 1\n delay = 0.5 * attempt\n self._log(f\"Retrying HTTP request in {delay} seconds.\", level=\"debug\")\n time.sleep(delay)\n continue\n\n # this should never really be hit under any sane circumstance\n raise PoetryException(\"Failed HTTP {} request\", method.upper())\n\n def get_credentials_for_url(self, url: str) -> tuple[str | None, str | None]:\n parsed_url = urllib.parse.urlsplit(url)\n\n netloc = parsed_url.netloc\n\n credentials = self._credentials.get(netloc, (None, None))\n\n if credentials == (None, None):\n if \"@\" not in netloc:\n credentials = self._get_credentials_for_netloc(netloc)\n else:\n # Split from the right because that's how urllib.parse.urlsplit()\n # behaves if more than one @ is present (which can be checked using\n # the password attribute of urlsplit()'s return value).\n auth, netloc = netloc.rsplit(\"@\", 1)\n # Split from the left because that's how urllib.parse.urlsplit()\n # behaves if more than one : is present (which again can be checked\n # using the password attribute of the return value)\n credentials = auth.split(\":\", 1) if \":\" in auth else (auth, None)\n credentials = tuple(\n None if x is None else urllib.parse.unquote(x) for x in credentials\n )\n\n if credentials[0] is not None or credentials[1] is not None:\n credentials = (credentials[0] or \"\", credentials[1] or \"\")\n\n self._credentials[netloc] = credentials\n\n return credentials[0], credentials[1]\n\n def get_pypi_token(self, name: str) -> str:\n return self._password_manager.get_pypi_token(name)\n\n def get_http_auth(self, name: str) -> dict[str, str] | None:\n return self._get_http_auth(name, None)\n\n def _get_http_auth(self, name: str, netloc: str | None) -> dict[str, str] | None:\n if name == \"pypi\":\n url = \"https://upload.pypi.org/legacy/\"\n else:\n url = self._config.get(f\"repositories.{name}.url\")\n if not url:\n return None\n\n parsed_url = urllib.parse.urlsplit(url)\n\n if netloc is None or netloc == parsed_url.netloc:\n auth = self._password_manager.get_http_auth(name)\n\n if auth is None or auth[\"password\"] is None:\n username = auth[\"username\"] if auth else None\n auth = self._get_credentials_for_netloc_from_keyring(\n url, parsed_url.netloc, username\n )\n\n return auth\n\n def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:\n for repository_name in self._config.get(\"repositories\", []):\n auth = self._get_http_auth(repository_name, netloc)\n\n if auth is None:\n continue\n\n return auth[\"username\"], auth[\"password\"]\n\n return None, None\n\n def _get_credentials_for_netloc_from_keyring(\n self, url: str, netloc: str, username: str | None\n ) -> dict[str, str] | None:\n import keyring\n\n cred = keyring.get_credential(url, username)\n if cred is not None:\n return {\n \"username\": cred.username,\n \"password\": cred.password,\n }\n\n cred = keyring.get_credential(netloc, username)\n if cred is not None:\n return {\n \"username\": cred.username,\n \"password\": cred.password,\n }\n\n if username:\n return {\n \"username\": username,\n \"password\": None,\n }\n\n return None\n", "path": "src/poetry/utils/authenticator.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport time\nimport urllib.parse\n\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import Iterator\n\nimport requests\nimport requests.auth\nimport requests.exceptions\n\nfrom poetry.exceptions import PoetryException\nfrom poetry.utils.helpers import get_cert\nfrom poetry.utils.helpers import get_client_cert\nfrom poetry.utils.password_manager import PasswordManager\n\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from cleo.io.io import IO\n\n from poetry.config.config import Config\n\n\nlogger = logging.getLogger()\n\n\nclass Authenticator:\n def __init__(self, config: Config, io: IO | None = None) -> None:\n self._config = config\n self._io = io\n self._session = None\n self._credentials = {}\n self._certs = {}\n self._password_manager = PasswordManager(self._config)\n\n def _log(self, message: str, level: str = \"debug\") -> None:\n if self._io is not None:\n self._io.write_line(f\"<{level}>{message}</{level}>\")\n else:\n getattr(logger, level, logger.debug)(message)\n\n @property\n def session(self) -> requests.Session:\n if self._session is None:\n self._session = requests.Session()\n\n return self._session\n\n def __del__(self) -> None:\n if self._session is not None:\n self._session.close()\n\n def request(self, method: str, url: str, **kwargs: Any) -> requests.Response:\n request = requests.Request(method, url)\n username, password = self.get_credentials_for_url(url)\n\n if username is not None and password is not None:\n request = requests.auth.HTTPBasicAuth(username, password)(request)\n\n session = self.session\n prepared_request = session.prepare_request(request)\n\n proxies = kwargs.get(\"proxies\", {})\n stream = kwargs.get(\"stream\")\n\n certs = self.get_certs_for_url(url)\n verify = kwargs.get(\"verify\") or certs.get(\"verify\")\n cert = kwargs.get(\"cert\") or certs.get(\"cert\")\n\n if cert is not None:\n cert = str(cert)\n\n if verify is not None:\n verify = str(verify)\n\n settings = session.merge_environment_settings(\n prepared_request.url, proxies, stream, verify, cert\n )\n\n # Send the request.\n send_kwargs = {\n \"timeout\": kwargs.get(\"timeout\"),\n \"allow_redirects\": kwargs.get(\"allow_redirects\", True),\n }\n send_kwargs.update(settings)\n\n attempt = 0\n\n while True:\n is_last_attempt = attempt >= 5\n try:\n resp = session.send(prepared_request, **send_kwargs)\n except (requests.exceptions.ConnectionError, OSError) as e:\n if is_last_attempt:\n raise e\n else:\n if resp.status_code not in [502, 503, 504] or is_last_attempt:\n resp.raise_for_status()\n return resp\n\n if not is_last_attempt:\n attempt += 1\n delay = 0.5 * attempt\n self._log(f\"Retrying HTTP request in {delay} seconds.\", level=\"debug\")\n time.sleep(delay)\n continue\n\n # this should never really be hit under any sane circumstance\n raise PoetryException(\"Failed HTTP {} request\", method.upper())\n\n def get_credentials_for_url(self, url: str) -> tuple[str | None, str | None]:\n parsed_url = urllib.parse.urlsplit(url)\n\n netloc = parsed_url.netloc\n\n credentials = self._credentials.get(netloc, (None, None))\n\n if credentials == (None, None):\n if \"@\" not in netloc:\n credentials = self._get_credentials_for_netloc(netloc)\n else:\n # Split from the right because that's how urllib.parse.urlsplit()\n # behaves if more than one @ is present (which can be checked using\n # the password attribute of urlsplit()'s return value).\n auth, netloc = netloc.rsplit(\"@\", 1)\n # Split from the left because that's how urllib.parse.urlsplit()\n # behaves if more than one : is present (which again can be checked\n # using the password attribute of the return value)\n credentials = auth.split(\":\", 1) if \":\" in auth else (auth, None)\n credentials = tuple(\n None if x is None else urllib.parse.unquote(x) for x in credentials\n )\n\n if credentials[0] is not None or credentials[1] is not None:\n credentials = (credentials[0] or \"\", credentials[1] or \"\")\n\n self._credentials[netloc] = credentials\n\n return credentials[0], credentials[1]\n\n def get_pypi_token(self, name: str) -> str:\n return self._password_manager.get_pypi_token(name)\n\n def get_http_auth(self, name: str) -> dict[str, str] | None:\n return self._get_http_auth(name, None)\n\n def _get_http_auth(self, name: str, netloc: str | None) -> dict[str, str] | None:\n if name == \"pypi\":\n url = \"https://upload.pypi.org/legacy/\"\n else:\n url = self._config.get(f\"repositories.{name}.url\")\n if not url:\n return None\n\n parsed_url = urllib.parse.urlsplit(url)\n\n if netloc is None or netloc == parsed_url.netloc:\n auth = self._password_manager.get_http_auth(name)\n\n if auth is None or auth[\"password\"] is None:\n username = auth[\"username\"] if auth else None\n auth = self._get_credentials_for_netloc_from_keyring(\n url, parsed_url.netloc, username\n )\n\n return auth\n\n def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:\n for repository_name, _ in self._get_repository_netlocs():\n auth = self._get_http_auth(repository_name, netloc)\n\n if auth is None:\n continue\n\n return auth[\"username\"], auth[\"password\"]\n\n return None, None\n\n def get_certs_for_url(self, url: str) -> dict[str, Path | None]:\n parsed_url = urllib.parse.urlsplit(url)\n\n netloc = parsed_url.netloc\n\n return self._certs.setdefault(\n netloc,\n self._get_certs_for_netloc_from_config(netloc),\n )\n\n def _get_repository_netlocs(self) -> Iterator[tuple[str, str]]:\n for repository_name in self._config.get(\"repositories\", []):\n url = self._config.get(f\"repositories.{repository_name}.url\")\n parsed_url = urllib.parse.urlsplit(url)\n yield repository_name, parsed_url.netloc\n\n def _get_credentials_for_netloc_from_keyring(\n self, url: str, netloc: str, username: str | None\n ) -> dict[str, str] | None:\n import keyring\n\n cred = keyring.get_credential(url, username)\n if cred is not None:\n return {\n \"username\": cred.username,\n \"password\": cred.password,\n }\n\n cred = keyring.get_credential(netloc, username)\n if cred is not None:\n return {\n \"username\": cred.username,\n \"password\": cred.password,\n }\n\n if username:\n return {\n \"username\": username,\n \"password\": None,\n }\n\n return None\n\n def _get_certs_for_netloc_from_config(self, netloc: str) -> dict[str, Path | None]:\n certs = {\"cert\": None, \"verify\": None}\n\n for repository_name, repository_netloc in self._get_repository_netlocs():\n if netloc == repository_netloc:\n certs[\"cert\"] = get_client_cert(self._config, repository_name)\n certs[\"verify\"] = get_cert(self._config, repository_name)\n break\n\n return certs\n", "path": "src/poetry/utils/authenticator.py"}]} | 2,767 | 816 |
gh_patches_debug_7247 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1299 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
core.hooksPath being set makes it not possible to install
I have a laptop which has some mandaotry global settings for git, and I have not been able to enable pre-commit. I also tried doing
```
git config --global init.templateDir ~/.git-template
pre-commit init-templatedir ~/.git-template
```
But still get the message
```
[ERROR] Cowardly refusing to install hooks with `core.hooksPath` set.
hint: `git config --unset-all core.hooksPath`
```
Is there any work arounds for this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/install_uninstall.py`
Content:
```
1 import itertools
2 import logging
3 import os.path
4 import shutil
5 import sys
6 from typing import Optional
7 from typing import Sequence
8 from typing import Tuple
9
10 from pre_commit import git
11 from pre_commit import output
12 from pre_commit.clientlib import load_config
13 from pre_commit.repository import all_hooks
14 from pre_commit.repository import install_hook_envs
15 from pre_commit.store import Store
16 from pre_commit.util import make_executable
17 from pre_commit.util import resource_text
18
19
20 logger = logging.getLogger(__name__)
21
22 # This is used to identify the hook file we install
23 PRIOR_HASHES = (
24 '4d9958c90bc262f47553e2c073f14cfe',
25 'd8ee923c46731b42cd95cc869add4062',
26 '49fd668cb42069aa1b6048464be5d395',
27 '79f09a650522a87b0da915d0d983b2de',
28 'e358c9dae00eac5d06b38dfdb1e33a8c',
29 )
30 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
31 TEMPLATE_START = '# start templated\n'
32 TEMPLATE_END = '# end templated\n'
33
34
35 def _hook_paths(
36 hook_type: str,
37 git_dir: Optional[str] = None,
38 ) -> Tuple[str, str]:
39 git_dir = git_dir if git_dir is not None else git.get_git_dir()
40 pth = os.path.join(git_dir, 'hooks', hook_type)
41 return pth, f'{pth}.legacy'
42
43
44 def is_our_script(filename: str) -> bool:
45 if not os.path.exists(filename): # pragma: windows no cover (symlink)
46 return False
47 with open(filename) as f:
48 contents = f.read()
49 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
50
51
52 def shebang() -> str:
53 if sys.platform == 'win32':
54 py = 'python'
55 else:
56 # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
57 path_choices = [p for p in os.defpath.split(os.pathsep) if p]
58 exe_choices = [
59 f'python{sys.version_info[0]}.{sys.version_info[1]}',
60 f'python{sys.version_info[0]}',
61 ]
62 for path, exe in itertools.product(path_choices, exe_choices):
63 if os.access(os.path.join(path, exe), os.X_OK):
64 py = exe
65 break
66 else:
67 py = 'python'
68 return f'#!/usr/bin/env {py}'
69
70
71 def _install_hook_script(
72 config_file: str,
73 hook_type: str,
74 overwrite: bool = False,
75 skip_on_missing_config: bool = False,
76 git_dir: Optional[str] = None,
77 ) -> None:
78 hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)
79
80 os.makedirs(os.path.dirname(hook_path), exist_ok=True)
81
82 # If we have an existing hook, move it to pre-commit.legacy
83 if os.path.lexists(hook_path) and not is_our_script(hook_path):
84 shutil.move(hook_path, legacy_path)
85
86 # If we specify overwrite, we simply delete the legacy file
87 if overwrite and os.path.exists(legacy_path):
88 os.remove(legacy_path)
89 elif os.path.exists(legacy_path):
90 output.write_line(
91 f'Running in migration mode with existing hooks at {legacy_path}\n'
92 f'Use -f to use only pre-commit.',
93 )
94
95 args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']
96 if skip_on_missing_config:
97 args.append('--skip-on-missing-config')
98 params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}
99
100 with open(hook_path, 'w') as hook_file:
101 contents = resource_text('hook-tmpl')
102 before, rest = contents.split(TEMPLATE_START)
103 to_template, after = rest.split(TEMPLATE_END)
104
105 before = before.replace('#!/usr/bin/env python3', shebang())
106
107 hook_file.write(before + TEMPLATE_START)
108 for line in to_template.splitlines():
109 var = line.split()[0]
110 hook_file.write(f'{var} = {params[var]!r}\n')
111 hook_file.write(TEMPLATE_END + after)
112 make_executable(hook_path)
113
114 output.write_line(f'pre-commit installed at {hook_path}')
115
116
117 def install(
118 config_file: str,
119 store: Store,
120 hook_types: Sequence[str],
121 overwrite: bool = False,
122 hooks: bool = False,
123 skip_on_missing_config: bool = False,
124 git_dir: Optional[str] = None,
125 ) -> int:
126 if git.has_core_hookpaths_set():
127 logger.error(
128 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
129 'hint: `git config --unset-all core.hooksPath`',
130 )
131 return 1
132
133 for hook_type in hook_types:
134 _install_hook_script(
135 config_file, hook_type,
136 overwrite=overwrite,
137 skip_on_missing_config=skip_on_missing_config,
138 git_dir=git_dir,
139 )
140
141 if hooks:
142 install_hooks(config_file, store)
143
144 return 0
145
146
147 def install_hooks(config_file: str, store: Store) -> int:
148 install_hook_envs(all_hooks(load_config(config_file), store), store)
149 return 0
150
151
152 def _uninstall_hook_script(hook_type: str) -> None:
153 hook_path, legacy_path = _hook_paths(hook_type)
154
155 # If our file doesn't exist or it isn't ours, gtfo.
156 if not os.path.exists(hook_path) or not is_our_script(hook_path):
157 return
158
159 os.remove(hook_path)
160 output.write_line(f'{hook_type} uninstalled')
161
162 if os.path.exists(legacy_path):
163 os.rename(legacy_path, hook_path)
164 output.write_line(f'Restored previous hooks to {hook_path}')
165
166
167 def uninstall(hook_types: Sequence[str]) -> int:
168 for hook_type in hook_types:
169 _uninstall_hook_script(hook_type)
170 return 0
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -123,7 +123,7 @@
skip_on_missing_config: bool = False,
git_dir: Optional[str] = None,
) -> int:
- if git.has_core_hookpaths_set():
+ if git_dir is None and git.has_core_hookpaths_set():
logger.error(
'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
'hint: `git config --unset-all core.hooksPath`',
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -123,7 +123,7 @@\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n ) -> int:\n- if git.has_core_hookpaths_set():\n+ if git_dir is None and git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n", "issue": "core.hooksPath being set makes it not possible to install\nI have a laptop which has some mandaotry global settings for git, and I have not been able to enable pre-commit. I also tried doing\r\n```\r\ngit config --global init.templateDir ~/.git-template\r\npre-commit init-templatedir ~/.git-template\r\n```\r\nBut still get the message\r\n```\r\n[ERROR] Cowardly refusing to install hooks with `core.hooksPath` set.\r\nhint: `git config --unset-all core.hooksPath`\r\n```\r\n\r\nIs there any work arounds for this?\n", "before_files": [{"content": "import itertools\nimport logging\nimport os.path\nimport shutil\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.store import Store\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(\n hook_type: str,\n git_dir: Optional[str] = None,\n) -> Tuple[str, str]:\n git_dir = git_dir if git_dir is not None else git.get_git_dir()\n pth = os.path.join(git_dir, 'hooks', hook_type)\n return pth, f'{pth}.legacy'\n\n\ndef is_our_script(filename: str) -> bool:\n if not os.path.exists(filename): # pragma: windows no cover (symlink)\n return False\n with open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang() -> str:\n if sys.platform == 'win32':\n py = 'python'\n else:\n # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n path_choices = [p for p in os.defpath.split(os.pathsep) if p]\n exe_choices = [\n f'python{sys.version_info[0]}.{sys.version_info[1]}',\n f'python{sys.version_info[0]}',\n ]\n for path, exe in itertools.product(path_choices, exe_choices):\n if os.access(os.path.join(path, exe), os.X_OK):\n py = exe\n break\n else:\n py = 'python'\n return f'#!/usr/bin/env {py}'\n\n\ndef _install_hook_script(\n config_file: str,\n hook_type: str,\n overwrite: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> None:\n hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)\n\n os.makedirs(os.path.dirname(hook_path), exist_ok=True)\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n shutil.move(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n f'Running in migration mode with existing hooks at {legacy_path}\\n'\n f'Use -f to use only pre-commit.',\n )\n\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}\n\n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python3', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write(f'{var} = {params[var]!r}\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line(f'pre-commit installed at {hook_path}')\n\n\ndef install(\n config_file: str,\n store: Store,\n hook_types: Sequence[str],\n overwrite: bool = False,\n hooks: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> int:\n if git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n for hook_type in hook_types:\n _install_hook_script(\n config_file, hook_type,\n overwrite=overwrite,\n skip_on_missing_config=skip_on_missing_config,\n git_dir=git_dir,\n )\n\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file: str, store: Store) -> int:\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n return 0\n\n\ndef _uninstall_hook_script(hook_type: str) -> None:\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return\n\n os.remove(hook_path)\n output.write_line(f'{hook_type} uninstalled')\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line(f'Restored previous hooks to {hook_path}')\n\n\ndef uninstall(hook_types: Sequence[str]) -> int:\n for hook_type in hook_types:\n _uninstall_hook_script(hook_type)\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}], "after_files": [{"content": "import itertools\nimport logging\nimport os.path\nimport shutil\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.store import Store\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(\n hook_type: str,\n git_dir: Optional[str] = None,\n) -> Tuple[str, str]:\n git_dir = git_dir if git_dir is not None else git.get_git_dir()\n pth = os.path.join(git_dir, 'hooks', hook_type)\n return pth, f'{pth}.legacy'\n\n\ndef is_our_script(filename: str) -> bool:\n if not os.path.exists(filename): # pragma: windows no cover (symlink)\n return False\n with open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang() -> str:\n if sys.platform == 'win32':\n py = 'python'\n else:\n # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n path_choices = [p for p in os.defpath.split(os.pathsep) if p]\n exe_choices = [\n f'python{sys.version_info[0]}.{sys.version_info[1]}',\n f'python{sys.version_info[0]}',\n ]\n for path, exe in itertools.product(path_choices, exe_choices):\n if os.access(os.path.join(path, exe), os.X_OK):\n py = exe\n break\n else:\n py = 'python'\n return f'#!/usr/bin/env {py}'\n\n\ndef _install_hook_script(\n config_file: str,\n hook_type: str,\n overwrite: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> None:\n hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)\n\n os.makedirs(os.path.dirname(hook_path), exist_ok=True)\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n shutil.move(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n f'Running in migration mode with existing hooks at {legacy_path}\\n'\n f'Use -f to use only pre-commit.',\n )\n\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}\n\n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python3', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write(f'{var} = {params[var]!r}\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line(f'pre-commit installed at {hook_path}')\n\n\ndef install(\n config_file: str,\n store: Store,\n hook_types: Sequence[str],\n overwrite: bool = False,\n hooks: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> int:\n if git_dir is None and git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n for hook_type in hook_types:\n _install_hook_script(\n config_file, hook_type,\n overwrite=overwrite,\n skip_on_missing_config=skip_on_missing_config,\n git_dir=git_dir,\n )\n\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file: str, store: Store) -> int:\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n return 0\n\n\ndef _uninstall_hook_script(hook_type: str) -> None:\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return\n\n os.remove(hook_path)\n output.write_line(f'{hook_type} uninstalled')\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line(f'Restored previous hooks to {hook_path}')\n\n\ndef uninstall(hook_types: Sequence[str]) -> int:\n for hook_type in hook_types:\n _uninstall_hook_script(hook_type)\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 2,232 | 147 |
gh_patches_debug_7165 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2064 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release SecureDrop 0.4.1
This is a tracking issue for this week's release of SecureDrop 0.4.1, a bugfix release for #2049. Tasks may get added or modified.
# Prepare release
- [x] Create `release/0.4.1` branch - @redshiftzero
- [x] PR, test and merge the hotfix for #2049 in [this comment](https://github.com/freedomofpress/securedrop/issues/2049#issuecomment-318251010)
- [x] Bump version numbers and build test debian packages. I'm suggesting that we bump the version numbers even though the application code will be unchanged (the fix is in Ansible used in the Tails workstation environment) for consistency. i.e. if admins check out the SecureDrop blog or tags, they should be able to look at the "Powered by SecureDrop x.x" and not be confused about what happened.
# QA
- [x] Run through Tails 2 -> Tails 3 upgrade one last time - @redshiftzero
# Release
- [x] Release 0.4.1
- [x] Publish blog post about 0.4.1 bugfix - @redshiftzero
# Post-release
- [x] Merge release changes into master branch - @redshiftzero
- [x] Merge release changes into development branch - @redshiftzero
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/version.py`
Content:
```
1 __version__ = '0.4'
2
```
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # SecureDrop documentation build configuration file, created by
4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import sys
16 import os
17 import shlex
18
19 # Detect if we're being built by Read the Docs
20 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs
21 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
22
23 # If extensions (or modules to document with autodoc) are in another directory,
24 # add these directories to sys.path here. If the directory is relative to the
25 # documentation root, use os.path.abspath to make it absolute, like shown here.
26 #sys.path.insert(0, os.path.abspath('.'))
27
28 # -- General configuration ------------------------------------------------
29
30 # If your documentation needs a minimal Sphinx version, state it here.
31 #needs_sphinx = '1.0'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = ['sphinx.ext.todo', ]
37
38 # Add any paths that contain templates here, relative to this directory.
39 templates_path = ['_templates']
40
41 # The suffix(es) of source filenames.
42 # You can specify multiple suffix as a list of string:
43 # source_suffix = ['.rst', '.md']
44 source_suffix = '.rst'
45
46 # The encoding of source files.
47 #source_encoding = 'utf-8-sig'
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = u'SecureDrop'
54 copyright = u'2017, Freedom of the Press Foundation'
55 author = u'SecureDrop Team and Contributors'
56
57 # The version info for the project you're documenting, acts as replacement for
58 # |version| and |release|, also used in various other places throughout the
59 # built documents.
60 #
61 # The short X.Y version.
62 version = '0.4'
63 # The full version, including alpha/beta/rc tags.
64 release = '0.4'
65
66 # The language for content autogenerated by Sphinx. Refer to documentation
67 # for a list of supported languages.
68 #
69 # This is also used if you do content translation via gettext catalogs.
70 # Usually you set "language" from the command line for these cases.
71 language = None
72
73 # There are two options for replacing |today|: either, you set today to some
74 # non-false value, then it is used:
75 #today = ''
76 # Else, today_fmt is used as the format for a strftime call.
77 #today_fmt = '%B %d, %Y'
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 exclude_patterns = ['_build']
82
83 # The reST default role (used for this markup: `text`) to use for all
84 # documents.
85 #default_role = None
86
87 # If true, '()' will be appended to :func: etc. cross-reference text.
88 #add_function_parentheses = True
89
90 # If true, the current module name will be prepended to all description
91 # unit titles (such as .. function::).
92 #add_module_names = True
93
94 # If true, sectionauthor and moduleauthor directives will be shown in the
95 # output. They are ignored by default.
96 #show_authors = False
97
98 # The name of the Pygments (syntax highlighting) style to use.
99 pygments_style = 'sphinx'
100
101 # A list of ignored prefixes for module index sorting.
102 #modindex_common_prefix = []
103
104 # If true, keep warnings as "system message" paragraphs in the built documents.
105 #keep_warnings = False
106
107 # If true, `todo` and `todoList` produce output, else they produce nothing.
108 todo_include_todos = False
109
110
111 # -- Options for HTML output ----------------------------------------------
112
113 # The theme to use for HTML and HTML Help pages. See the documentation for
114 # a list of builtin themes.
115 if on_rtd:
116 html_theme = 'default'
117 else:
118 try:
119 # If you want to build the docs locally using the RTD theme,
120 # you may need to install it: ``pip install sphinx_rtd_theme``.
121 # https://github.com/snide/sphinx_rtd_theme#via-package
122 import sphinx_rtd_theme
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125 except ImportError:
126 # This theme is included with Sphinx and is quite nice (based
127 # on the Pocoo themes), but since we're using the RTD theme
128 # for the production docs, it's best to use that to avoid
129 # issues due to discrepancies between the themes.
130 html_theme = 'alabaster'
131
132 # Theme options are theme-specific and customize the look and feel of a theme
133 # further. For a list of options available for each theme, see the
134 # documentation.
135 #html_theme_options = {}
136
137 # Add any paths that contain custom themes here, relative to this directory.
138 #html_theme_path = []
139
140 # The name for this set of Sphinx documents. If None, it defaults to
141 # "<project> v<release> documentation".
142 #html_title = None
143
144 # A shorter title for the navigation bar. Default is the same as html_title.
145 #html_short_title = None
146
147 # The name of an image file (relative to this directory) to place at the top
148 # of the sidebar.
149 html_logo = '../securedrop/static/i/favicon.png'
150
151 # The name of an image file (within the static path) to use as favicon of the
152 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
153 # pixels large.
154 #html_favicon = None
155
156 # Add any paths that contain custom static files (such as style sheets) here,
157 # relative to this directory. They are copied after the builtin static files,
158 # so a file named "default.css" will overwrite the builtin "default.css".
159 # html_static_path = ['_static']
160
161 # Add any extra paths that contain custom files (such as robots.txt or
162 # .htaccess) here, relative to this directory. These files are copied
163 # directly to the root of the documentation.
164 #html_extra_path = []
165
166 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
167 # using the given strftime format.
168 #html_last_updated_fmt = '%b %d, %Y'
169
170 # If true, SmartyPants will be used to convert quotes and dashes to
171 # typographically correct entities.
172 #html_use_smartypants = True
173
174 # Custom sidebar templates, maps document names to template names.
175 #html_sidebars = {}
176
177 # Additional templates that should be rendered to pages, maps page names to
178 # template names.
179 #html_additional_pages = {}
180
181 # If false, no module index is generated.
182 #html_domain_indices = True
183
184 # If false, no index is generated.
185 #html_use_index = True
186
187 # If true, the index is split into individual pages for each letter.
188 #html_split_index = False
189
190 # If true, links to the reST sources are added to the pages.
191 #html_show_sourcelink = True
192
193 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
194 #html_show_sphinx = True
195
196 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
197 #html_show_copyright = True
198
199 # If true, an OpenSearch description file will be output, and all pages will
200 # contain a <link> tag referring to it. The value of this option must be the
201 # base URL from which the finished HTML is served.
202 #html_use_opensearch = ''
203
204 # This is the file name suffix for HTML files (e.g. ".xhtml").
205 #html_file_suffix = None
206
207 # Language to be used for generating the HTML full-text search index.
208 # Sphinx supports the following languages:
209 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
210 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
211 #html_search_language = 'en'
212
213 # A dictionary with options for the search language support, empty by default.
214 # Now only 'ja' uses this config value
215 #html_search_options = {'type': 'default'}
216
217 # The name of a javascript file (relative to the configuration directory) that
218 # implements a search results scorer. If empty, the default will be used.
219 #html_search_scorer = 'scorer.js'
220
221 # Output file base name for HTML help builder.
222 htmlhelp_basename = 'SecureDropdoc'
223
224 # -- Options for LaTeX output ---------------------------------------------
225
226 latex_elements = {
227 # The paper size ('letterpaper' or 'a4paper').
228 #'papersize': 'letterpaper',
229
230 # The font size ('10pt', '11pt' or '12pt').
231 #'pointsize': '10pt',
232
233 # Additional stuff for the LaTeX preamble.
234 #'preamble': '',
235
236 # Latex figure (float) alignment
237 #'figure_align': 'htbp',
238 }
239
240 # Grouping the document tree into LaTeX files. List of tuples
241 # (source start file, target name, title,
242 # author, documentclass [howto, manual, or own class]).
243 latex_documents = [
244 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',
245 author, 'manual'),
246 ]
247
248 # The name of an image file (relative to this directory) to place at the top of
249 # the title page.
250 #latex_logo = None
251
252 # For "manual" documents, if this is true, then toplevel headings are parts,
253 # not chapters.
254 #latex_use_parts = False
255
256 # If true, show page references after internal links.
257 #latex_show_pagerefs = False
258
259 # If true, show URL addresses after external links.
260 #latex_show_urls = False
261
262 # Documents to append as an appendix to all manuals.
263 #latex_appendices = []
264
265 # If false, no module index is generated.
266 #latex_domain_indices = True
267
268
269 # -- Options for manual page output ---------------------------------------
270
271 # One entry per manual page. List of tuples
272 # (source start file, name, description, authors, manual section).
273 man_pages = [
274 (master_doc, 'securedrop', u'SecureDrop Documentation',
275 [author], 1)
276 ]
277
278 # If true, show URL addresses after external links.
279 #man_show_urls = False
280
281
282 # -- Options for Texinfo output -------------------------------------------
283
284 # Grouping the document tree into Texinfo files. List of tuples
285 # (source start file, target name, title, author,
286 # dir menu entry, description, category)
287 texinfo_documents = [
288 (master_doc, 'SecureDrop', u'SecureDrop Documentation',
289 author, 'SecureDrop', 'One line description of project.',
290 'Miscellaneous'),
291 ]
292
293 # Documents to append as an appendix to all manuals.
294 #texinfo_appendices = []
295
296 # If false, no module index is generated.
297 #texinfo_domain_indices = True
298
299 # How to display URL addresses: 'footnote', 'no', or 'inline'.
300 #texinfo_show_urls = 'footnote'
301
302 # If true, do not generate a @detailmenu in the "Top" node's menu.
303 #texinfo_no_detailmenu = False
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -59,9 +59,9 @@
# built documents.
#
# The short X.Y version.
-version = '0.4'
+version = '0.4.1'
# The full version, including alpha/beta/rc tags.
-release = '0.4'
+release = '0.4.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/securedrop/version.py b/securedrop/version.py
--- a/securedrop/version.py
+++ b/securedrop/version.py
@@ -1 +1 @@
-__version__ = '0.4'
+__version__ = '0.4.1'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -59,9 +59,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '0.4'\n+version = '0.4.1'\n # The full version, including alpha/beta/rc tags.\n-release = '0.4'\n+release = '0.4.1'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = '0.4'\n+__version__ = '0.4.1'\n", "issue": "Release SecureDrop 0.4.1\nThis is a tracking issue for this week's release of SecureDrop 0.4.1, a bugfix release for #2049. Tasks may get added or modified. \r\n\r\n# Prepare release\r\n- [x] Create `release/0.4.1` branch - @redshiftzero\r\n- [x] PR, test and merge the hotfix for #2049 in [this comment](https://github.com/freedomofpress/securedrop/issues/2049#issuecomment-318251010)\r\n- [x] Bump version numbers and build test debian packages. I'm suggesting that we bump the version numbers even though the application code will be unchanged (the fix is in Ansible used in the Tails workstation environment) for consistency. i.e. if admins check out the SecureDrop blog or tags, they should be able to look at the \"Powered by SecureDrop x.x\" and not be confused about what happened. \r\n\r\n# QA\r\n \r\n- [x] Run through Tails 2 -> Tails 3 upgrade one last time - @redshiftzero\r\n\r\n# Release\r\n\r\n- [x] Release 0.4.1\r\n- [x] Publish blog post about 0.4.1 bugfix - @redshiftzero\r\n\r\n# Post-release\r\n\r\n- [x] Merge release changes into master branch - @redshiftzero\r\n- [x] Merge release changes into development branch - @redshiftzero\n", "before_files": [{"content": "__version__ = '0.4'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2017, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.4'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.4'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}], "after_files": [{"content": "__version__ = '0.4.1'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2017, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.4.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.4.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}]} | 3,961 | 176 |
gh_patches_debug_16746 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1927 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: Reduce size image in plot_inpaint
In reference to #1920
- Reduce the size of the image in doc/examples/filters/plot_inpaint.py to show more clearly the result of the algorithm.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/examples/filters/plot_inpaint.py`
Content:
```
1 """
2 ===========
3 Inpainting
4 ===========
5 Inpainting [1]_ is the process of reconstructing lost or deteriorated
6 parts of images and videos.
7
8 The reconstruction is supposed to be performed in fully automatic way by
9 exploiting the information presented in non-damaged regions.
10
11 In this example, we show how the masked pixels get inpainted by
12 inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.
13
14 .. [1] Wikipedia. Inpainting
15 https://en.wikipedia.org/wiki/Inpainting
16 .. [2] Wikipedia. Biharmonic equation
17 https://en.wikipedia.org/wiki/Biharmonic_equation
18 .. [3] N.S.Hoang, S.B.Damelin, "On surface completion and image
19 inpainting by biharmonic functions: numerical aspects",
20 http://www.ima.umn.edu/~damelin/biharmonic
21 """
22
23 import numpy as np
24 import matplotlib.pyplot as plt
25
26 from skimage import data, color
27 from skimage.restoration import inpaint
28
29 image_orig = data.astronaut()
30
31 # Create mask with three defect regions: left, middle, right respectively
32 mask = np.zeros(image_orig.shape[:-1])
33 mask[20:60, 0:20] = 1
34 mask[200:300, 150:170] = 1
35 mask[50:100, 400:430] = 1
36
37 # Defect image over the same region in each color channel
38 image_defect = image_orig.copy()
39 for layer in range(image_defect.shape[-1]):
40 image_defect[np.where(mask)] = 0
41
42 image_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)
43
44 fig, axes = plt.subplots(ncols=2, nrows=2)
45 ax0, ax1, ax2, ax3 = axes.ravel()
46
47 ax0.set_title('Original image')
48 ax0.imshow(image_orig)
49 ax0.axis('off')
50
51 ax1.set_title('Mask')
52 ax1.imshow(mask, cmap=plt.cm.gray)
53 ax1.axis('off')
54
55 ax2.set_title('Defected image')
56 ax2.imshow(image_defect)
57 ax2.axis('off')
58
59 ax3.set_title('Inpainted image')
60 ax3.imshow(image_result)
61 ax3.axis('off')
62
63 plt.tight_layout()
64 plt.show()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py
--- a/doc/examples/filters/plot_inpaint.py
+++ b/doc/examples/filters/plot_inpaint.py
@@ -26,13 +26,13 @@
from skimage import data, color
from skimage.restoration import inpaint
-image_orig = data.astronaut()
+image_orig = data.astronaut()[0:200, 0:200]
# Create mask with three defect regions: left, middle, right respectively
mask = np.zeros(image_orig.shape[:-1])
mask[20:60, 0:20] = 1
-mask[200:300, 150:170] = 1
-mask[50:100, 400:430] = 1
+mask[160:180, 70:155] = 1
+mask[30:60, 170:195] = 1
# Defect image over the same region in each color channel
image_defect = image_orig.copy()
@@ -60,5 +60,5 @@
ax3.imshow(image_result)
ax3.axis('off')
-plt.tight_layout()
+fig.tight_layout()
plt.show()
| {"golden_diff": "diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py\n--- a/doc/examples/filters/plot_inpaint.py\n+++ b/doc/examples/filters/plot_inpaint.py\n@@ -26,13 +26,13 @@\n from skimage import data, color\n from skimage.restoration import inpaint\n \n-image_orig = data.astronaut()\n+image_orig = data.astronaut()[0:200, 0:200]\n \n # Create mask with three defect regions: left, middle, right respectively\n mask = np.zeros(image_orig.shape[:-1])\n mask[20:60, 0:20] = 1\n-mask[200:300, 150:170] = 1\n-mask[50:100, 400:430] = 1\n+mask[160:180, 70:155] = 1\n+mask[30:60, 170:195] = 1\n \n # Defect image over the same region in each color channel\n image_defect = image_orig.copy()\n@@ -60,5 +60,5 @@\n ax3.imshow(image_result)\n ax3.axis('off')\n \n-plt.tight_layout()\n+fig.tight_layout()\n plt.show()\n", "issue": "DOC: Reduce size image in plot_inpaint\nIn reference to #1920 \n- Reduce the size of the image in doc/examples/filters/plot_inpaint.py to show more clearly the result of the algorithm.\n\n", "before_files": [{"content": "\"\"\"\n===========\nInpainting\n===========\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction is supposed to be performed in fully automatic way by\nexploiting the information presented in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted by\ninpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] N.S.Hoang, S.B.Damelin, \"On surface completion and image\n inpainting by biharmonic functions: numerical aspects\",\n http://www.ima.umn.edu/~damelin/biharmonic\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data, color\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()\n\n# Create mask with three defect regions: left, middle, right respectively\nmask = np.zeros(image_orig.shape[:-1])\nmask[20:60, 0:20] = 1\nmask[200:300, 150:170] = 1\nmask[50:100, 400:430] = 1\n\n# Defect image over the same region in each color channel\nimage_defect = image_orig.copy()\nfor layer in range(image_defect.shape[-1]):\n image_defect[np.where(mask)] = 0\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax0, ax1, ax2, ax3 = axes.ravel()\n\nax0.set_title('Original image')\nax0.imshow(image_orig)\nax0.axis('off')\n\nax1.set_title('Mask')\nax1.imshow(mask, cmap=plt.cm.gray)\nax1.axis('off')\n\nax2.set_title('Defected image')\nax2.imshow(image_defect)\nax2.axis('off')\n\nax3.set_title('Inpainted image')\nax3.imshow(image_result)\nax3.axis('off')\n\nplt.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}], "after_files": [{"content": "\"\"\"\n===========\nInpainting\n===========\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction is supposed to be performed in fully automatic way by\nexploiting the information presented in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted by\ninpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] N.S.Hoang, S.B.Damelin, \"On surface completion and image\n inpainting by biharmonic functions: numerical aspects\",\n http://www.ima.umn.edu/~damelin/biharmonic\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data, color\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()[0:200, 0:200]\n\n# Create mask with three defect regions: left, middle, right respectively\nmask = np.zeros(image_orig.shape[:-1])\nmask[20:60, 0:20] = 1\nmask[160:180, 70:155] = 1\nmask[30:60, 170:195] = 1\n\n# Defect image over the same region in each color channel\nimage_defect = image_orig.copy()\nfor layer in range(image_defect.shape[-1]):\n image_defect[np.where(mask)] = 0\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax0, ax1, ax2, ax3 = axes.ravel()\n\nax0.set_title('Original image')\nax0.imshow(image_orig)\nax0.axis('off')\n\nax1.set_title('Mask')\nax1.imshow(mask, cmap=plt.cm.gray)\nax1.axis('off')\n\nax2.set_title('Defected image')\nax2.imshow(image_defect)\nax2.axis('off')\n\nax3.set_title('Inpainted image')\nax3.imshow(image_result)\nax3.axis('off')\n\nfig.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}]} | 950 | 300 |
gh_patches_debug_32032 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-376 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[metrics] AUROC Metric can't handle 0 observations of a class with multiclass classifier
I'm attempting to calculate AUROC for a multiclass problem where some classes are very rare, occasionally never seen, and I'm getting the following error: `raise ValueError("No positive samples in targets, true positive value should be meaningless")`
In the case of 0 observations, I feel the `average='weighted'` should work, since the contribution to the final AUROC should be 0 regardless. One can think of other scenarios where there are a very high number of classes, some of which will happen to not be seen in some dataset.
_Originally posted by @BeyondTheProof in https://github.com/PyTorchLightning/pytorch-lightning/issues/2210#issuecomment-872440776_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchmetrics/functional/classification/auroc.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional, Sequence, Tuple
15
16 import torch
17 from torch import Tensor, tensor
18
19 from torchmetrics.functional.classification.auc import _auc_compute_without_check
20 from torchmetrics.functional.classification.roc import roc
21 from torchmetrics.utilities.checks import _input_format_classification
22 from torchmetrics.utilities.enums import AverageMethod, DataType
23 from torchmetrics.utilities.imports import _TORCH_LOWER_1_6
24
25
26 def _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]:
27 # use _input_format_classification for validating the input and get the mode of data
28 _, _, mode = _input_format_classification(preds, target)
29
30 if mode == 'multi class multi dim':
31 n_classes = preds.shape[1]
32 preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)
33 target = target.flatten()
34 if mode == 'multi-label' and preds.ndim > 2:
35 n_classes = preds.shape[1]
36 preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)
37 target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)
38
39 return preds, target, mode
40
41
42 def _auroc_compute(
43 preds: Tensor,
44 target: Tensor,
45 mode: DataType,
46 num_classes: Optional[int] = None,
47 pos_label: Optional[int] = None,
48 average: Optional[str] = 'macro',
49 max_fpr: Optional[float] = None,
50 sample_weights: Optional[Sequence] = None,
51 ) -> Tensor:
52 # binary mode override num_classes
53 if mode == DataType.BINARY:
54 num_classes = 1
55
56 # check max_fpr parameter
57 if max_fpr is not None:
58 if not isinstance(max_fpr, float) and 0 < max_fpr <= 1:
59 raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}")
60
61 if _TORCH_LOWER_1_6:
62 raise RuntimeError(
63 "`max_fpr` argument requires `torch.bucketize` which"
64 " is not available below PyTorch version 1.6"
65 )
66
67 # max_fpr parameter is only support for binary
68 if mode != DataType.BINARY:
69 raise ValueError(
70 f"Partial AUC computation not available in"
71 f" multilabel/multiclass setting, 'max_fpr' must be"
72 f" set to `None`, received `{max_fpr}`."
73 )
74
75 # calculate fpr, tpr
76 if mode == DataType.MULTILABEL:
77 if average == AverageMethod.MICRO:
78 fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights)
79 elif num_classes:
80 # for multilabel we iteratively evaluate roc in a binary fashion
81 output = [
82 roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights)
83 for i in range(num_classes)
84 ]
85 fpr = [o[0] for o in output]
86 tpr = [o[1] for o in output]
87 else:
88 raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')
89 else:
90 if mode != DataType.BINARY and num_classes is None:
91 raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')
92 fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)
93
94 # calculate standard roc auc score
95 if max_fpr is None or max_fpr == 1:
96 if mode == DataType.MULTILABEL and average == AverageMethod.MICRO:
97 pass
98 elif num_classes != 1:
99 # calculate auc scores per class
100 auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)]
101
102 # calculate average
103 if average == AverageMethod.NONE:
104 return tensor(auc_scores)
105 if average == AverageMethod.MACRO:
106 return torch.mean(torch.stack(auc_scores))
107 if average == AverageMethod.WEIGHTED:
108 if mode == DataType.MULTILABEL:
109 support = torch.sum(target, dim=0)
110 else:
111 support = torch.bincount(target.flatten(), minlength=num_classes)
112 return torch.sum(torch.stack(auc_scores) * support / support.sum())
113
114 allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value)
115 raise ValueError(
116 f"Argument `average` expected to be one of the following:"
117 f" {allowed_average} but got {average}"
118 )
119
120 return _auc_compute_without_check(fpr, tpr, 1.0)
121
122 _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device
123 max_area: Tensor = tensor(max_fpr, device=_device)
124 # Add a single point at max_fpr and interpolate its tpr value
125 stop = torch.bucketize(max_area, fpr, out_int32=True, right=True)
126 weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1])
127 interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight)
128 tpr = torch.cat([tpr[:stop], interp_tpr.view(1)])
129 fpr = torch.cat([fpr[:stop], max_area.view(1)])
130
131 # Compute partial AUC
132 partial_auc = _auc_compute_without_check(fpr, tpr, 1.0)
133
134 # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal
135 min_area: Tensor = 0.5 * max_area**2
136 return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
137
138
139 def auroc(
140 preds: Tensor,
141 target: Tensor,
142 num_classes: Optional[int] = None,
143 pos_label: Optional[int] = None,
144 average: Optional[str] = 'macro',
145 max_fpr: Optional[float] = None,
146 sample_weights: Optional[Sequence] = None,
147 ) -> Tensor:
148 """ Compute `Area Under the Receiver Operating Characteristic Curve (ROC AUC)
149 <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Further_interpretations>`_
150
151 Args:
152 preds: predictions from model (logits or probabilities)
153 target: Ground truth labels
154 num_classes: integer with number of classes for multi-label and multiclass problems.
155 Should be set to ``None`` for binary problems
156 pos_label: integer determining the positive class. Default is ``None``
157 which for binary problem is translate to 1. For multiclass problems
158 this argument should not be set as we iteratively change it in the
159 range [0,num_classes-1]
160 average:
161 - ``'micro'`` computes metric globally. Only works for multilabel problems
162 - ``'macro'`` computes metric for each class and uniformly averages them
163 - ``'weighted'`` computes metric for each class and does a weighted-average,
164 where each class is weighted by their support (accounts for class imbalance)
165 - ``None`` computes and returns the metric per class
166 max_fpr:
167 If not ``None``, calculates standardized partial AUC over the
168 range [0, max_fpr]. Should be a float between 0 and 1.
169 sample_weights: sample weights for each data point
170
171 Raises:
172 ValueError:
173 If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``.
174 RuntimeError:
175 If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `torch.bucketize`
176 which is not available below 1.6.
177 ValueError:
178 If ``max_fpr`` is not set to ``None`` and the mode is ``not binary``
179 since partial AUC computation is not available in multilabel/multiclass.
180 ValueError:
181 If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``.
182
183 Example (binary case):
184 >>> from torchmetrics.functional import auroc
185 >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
186 >>> target = torch.tensor([0, 0, 1, 1, 1])
187 >>> auroc(preds, target, pos_label=1)
188 tensor(0.5000)
189
190 Example (multiclass case):
191 >>> preds = torch.tensor([[0.90, 0.05, 0.05],
192 ... [0.05, 0.90, 0.05],
193 ... [0.05, 0.05, 0.90],
194 ... [0.85, 0.05, 0.10],
195 ... [0.10, 0.10, 0.80]])
196 >>> target = torch.tensor([0, 1, 1, 2, 2])
197 >>> auroc(preds, target, num_classes=3)
198 tensor(0.7778)
199 """
200 preds, target, mode = _auroc_update(preds, target)
201 return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights)
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchmetrics/functional/classification/auroc.py b/torchmetrics/functional/classification/auroc.py
--- a/torchmetrics/functional/classification/auroc.py
+++ b/torchmetrics/functional/classification/auroc.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import warnings
from typing import Optional, Sequence, Tuple
import torch
@@ -87,8 +88,23 @@
else:
raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')
else:
- if mode != DataType.BINARY and num_classes is None:
- raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')
+ if mode != DataType.BINARY:
+ if num_classes is None:
+ raise ValueError("Detected input to `multiclass` but you did not provide `num_classes` argument")
+ if average == AverageMethod.WEIGHTED and len(torch.unique(target)) < num_classes:
+ # If one or more classes has 0 observations, we should exclude them, as its weight will be 0
+ target_bool_mat = torch.zeros((len(target), num_classes), dtype=bool)
+ target_bool_mat[torch.arange(len(target)), target.long()] = 1
+ class_observed = target_bool_mat.sum(axis=0) > 0
+ for c in range(num_classes):
+ if not class_observed[c]:
+ warnings.warn(f'Class {c} had 0 observations, omitted from AUROC calculation', UserWarning)
+ preds = preds[:, class_observed]
+ target = target_bool_mat[:, class_observed]
+ target = torch.where(target)[1]
+ num_classes = class_observed.sum()
+ if num_classes == 1:
+ raise ValueError('Found 1 non-empty class in `multiclass` AUROC calculation')
fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)
# calculate standard roc auc score
| {"golden_diff": "diff --git a/torchmetrics/functional/classification/auroc.py b/torchmetrics/functional/classification/auroc.py\n--- a/torchmetrics/functional/classification/auroc.py\n+++ b/torchmetrics/functional/classification/auroc.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import warnings\n from typing import Optional, Sequence, Tuple\n \n import torch\n@@ -87,8 +88,23 @@\n else:\n raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')\n else:\n- if mode != DataType.BINARY and num_classes is None:\n- raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')\n+ if mode != DataType.BINARY:\n+ if num_classes is None:\n+ raise ValueError(\"Detected input to `multiclass` but you did not provide `num_classes` argument\")\n+ if average == AverageMethod.WEIGHTED and len(torch.unique(target)) < num_classes:\n+ # If one or more classes has 0 observations, we should exclude them, as its weight will be 0\n+ target_bool_mat = torch.zeros((len(target), num_classes), dtype=bool)\n+ target_bool_mat[torch.arange(len(target)), target.long()] = 1\n+ class_observed = target_bool_mat.sum(axis=0) > 0\n+ for c in range(num_classes):\n+ if not class_observed[c]:\n+ warnings.warn(f'Class {c} had 0 observations, omitted from AUROC calculation', UserWarning)\n+ preds = preds[:, class_observed]\n+ target = target_bool_mat[:, class_observed]\n+ target = torch.where(target)[1]\n+ num_classes = class_observed.sum()\n+ if num_classes == 1:\n+ raise ValueError('Found 1 non-empty class in `multiclass` AUROC calculation')\n fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)\n \n # calculate standard roc auc score\n", "issue": "[metrics] AUROC Metric can't handle 0 observations of a class with multiclass classifier\nI'm attempting to calculate AUROC for a multiclass problem where some classes are very rare, occasionally never seen, and I'm getting the following error: `raise ValueError(\"No positive samples in targets, true positive value should be meaningless\")`\r\n\r\nIn the case of 0 observations, I feel the `average='weighted'` should work, since the contribution to the final AUROC should be 0 regardless. One can think of other scenarios where there are a very high number of classes, some of which will happen to not be seen in some dataset.\r\n\r\n_Originally posted by @BeyondTheProof in https://github.com/PyTorchLightning/pytorch-lightning/issues/2210#issuecomment-872440776_\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional, Sequence, Tuple\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.classification.auc import _auc_compute_without_check\nfrom torchmetrics.functional.classification.roc import roc\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.enums import AverageMethod, DataType\nfrom torchmetrics.utilities.imports import _TORCH_LOWER_1_6\n\n\ndef _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]:\n # use _input_format_classification for validating the input and get the mode of data\n _, _, mode = _input_format_classification(preds, target)\n\n if mode == 'multi class multi dim':\n n_classes = preds.shape[1]\n preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n target = target.flatten()\n if mode == 'multi-label' and preds.ndim > 2:\n n_classes = preds.shape[1]\n preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n\n return preds, target, mode\n\n\ndef _auroc_compute(\n preds: Tensor,\n target: Tensor,\n mode: DataType,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = 'macro',\n max_fpr: Optional[float] = None,\n sample_weights: Optional[Sequence] = None,\n) -> Tensor:\n # binary mode override num_classes\n if mode == DataType.BINARY:\n num_classes = 1\n\n # check max_fpr parameter\n if max_fpr is not None:\n if not isinstance(max_fpr, float) and 0 < max_fpr <= 1:\n raise ValueError(f\"`max_fpr` should be a float in range (0, 1], got: {max_fpr}\")\n\n if _TORCH_LOWER_1_6:\n raise RuntimeError(\n \"`max_fpr` argument requires `torch.bucketize` which\"\n \" is not available below PyTorch version 1.6\"\n )\n\n # max_fpr parameter is only support for binary\n if mode != DataType.BINARY:\n raise ValueError(\n f\"Partial AUC computation not available in\"\n f\" multilabel/multiclass setting, 'max_fpr' must be\"\n f\" set to `None`, received `{max_fpr}`.\"\n )\n\n # calculate fpr, tpr\n if mode == DataType.MULTILABEL:\n if average == AverageMethod.MICRO:\n fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights)\n elif num_classes:\n # for multilabel we iteratively evaluate roc in a binary fashion\n output = [\n roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights)\n for i in range(num_classes)\n ]\n fpr = [o[0] for o in output]\n tpr = [o[1] for o in output]\n else:\n raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')\n else:\n if mode != DataType.BINARY and num_classes is None:\n raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')\n fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)\n\n # calculate standard roc auc score\n if max_fpr is None or max_fpr == 1:\n if mode == DataType.MULTILABEL and average == AverageMethod.MICRO:\n pass\n elif num_classes != 1:\n # calculate auc scores per class\n auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)]\n\n # calculate average\n if average == AverageMethod.NONE:\n return tensor(auc_scores)\n if average == AverageMethod.MACRO:\n return torch.mean(torch.stack(auc_scores))\n if average == AverageMethod.WEIGHTED:\n if mode == DataType.MULTILABEL:\n support = torch.sum(target, dim=0)\n else:\n support = torch.bincount(target.flatten(), minlength=num_classes)\n return torch.sum(torch.stack(auc_scores) * support / support.sum())\n\n allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value)\n raise ValueError(\n f\"Argument `average` expected to be one of the following:\"\n f\" {allowed_average} but got {average}\"\n )\n\n return _auc_compute_without_check(fpr, tpr, 1.0)\n\n _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device\n max_area: Tensor = tensor(max_fpr, device=_device)\n # Add a single point at max_fpr and interpolate its tpr value\n stop = torch.bucketize(max_area, fpr, out_int32=True, right=True)\n weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1])\n interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight)\n tpr = torch.cat([tpr[:stop], interp_tpr.view(1)])\n fpr = torch.cat([fpr[:stop], max_area.view(1)])\n\n # Compute partial AUC\n partial_auc = _auc_compute_without_check(fpr, tpr, 1.0)\n\n # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal\n min_area: Tensor = 0.5 * max_area**2\n return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))\n\n\ndef auroc(\n preds: Tensor,\n target: Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = 'macro',\n max_fpr: Optional[float] = None,\n sample_weights: Optional[Sequence] = None,\n) -> Tensor:\n \"\"\" Compute `Area Under the Receiver Operating Characteristic Curve (ROC AUC)\n <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Further_interpretations>`_\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: Ground truth labels\n num_classes: integer with number of classes for multi-label and multiclass problems.\n Should be set to ``None`` for binary problems\n pos_label: integer determining the positive class. Default is ``None``\n which for binary problem is translate to 1. For multiclass problems\n this argument should not be set as we iteratively change it in the\n range [0,num_classes-1]\n average:\n - ``'micro'`` computes metric globally. Only works for multilabel problems\n - ``'macro'`` computes metric for each class and uniformly averages them\n - ``'weighted'`` computes metric for each class and does a weighted-average,\n where each class is weighted by their support (accounts for class imbalance)\n - ``None`` computes and returns the metric per class\n max_fpr:\n If not ``None``, calculates standardized partial AUC over the\n range [0, max_fpr]. Should be a float between 0 and 1.\n sample_weights: sample weights for each data point\n\n Raises:\n ValueError:\n If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``.\n RuntimeError:\n If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `torch.bucketize`\n which is not available below 1.6.\n ValueError:\n If ``max_fpr`` is not set to ``None`` and the mode is ``not binary``\n since partial AUC computation is not available in multilabel/multiclass.\n ValueError:\n If ``average`` is none of ``None``, ``\"macro\"`` or ``\"weighted\"``.\n\n Example (binary case):\n >>> from torchmetrics.functional import auroc\n >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])\n >>> target = torch.tensor([0, 0, 1, 1, 1])\n >>> auroc(preds, target, pos_label=1)\n tensor(0.5000)\n\n Example (multiclass case):\n >>> preds = torch.tensor([[0.90, 0.05, 0.05],\n ... [0.05, 0.90, 0.05],\n ... [0.05, 0.05, 0.90],\n ... [0.85, 0.05, 0.10],\n ... [0.10, 0.10, 0.80]])\n >>> target = torch.tensor([0, 1, 1, 2, 2])\n >>> auroc(preds, target, num_classes=3)\n tensor(0.7778)\n \"\"\"\n preds, target, mode = _auroc_update(preds, target)\n return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights)\n", "path": "torchmetrics/functional/classification/auroc.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom typing import Optional, Sequence, Tuple\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.classification.auc import _auc_compute_without_check\nfrom torchmetrics.functional.classification.roc import roc\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.enums import AverageMethod, DataType\nfrom torchmetrics.utilities.imports import _TORCH_LOWER_1_6\n\n\ndef _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]:\n # use _input_format_classification for validating the input and get the mode of data\n _, _, mode = _input_format_classification(preds, target)\n\n if mode == 'multi class multi dim':\n n_classes = preds.shape[1]\n preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n target = target.flatten()\n if mode == 'multi-label' and preds.ndim > 2:\n n_classes = preds.shape[1]\n preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n\n return preds, target, mode\n\n\ndef _auroc_compute(\n preds: Tensor,\n target: Tensor,\n mode: DataType,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = 'macro',\n max_fpr: Optional[float] = None,\n sample_weights: Optional[Sequence] = None,\n) -> Tensor:\n # binary mode override num_classes\n if mode == DataType.BINARY:\n num_classes = 1\n\n # check max_fpr parameter\n if max_fpr is not None:\n if not isinstance(max_fpr, float) and 0 < max_fpr <= 1:\n raise ValueError(f\"`max_fpr` should be a float in range (0, 1], got: {max_fpr}\")\n\n if _TORCH_LOWER_1_6:\n raise RuntimeError(\n \"`max_fpr` argument requires `torch.bucketize` which\"\n \" is not available below PyTorch version 1.6\"\n )\n\n # max_fpr parameter is only support for binary\n if mode != DataType.BINARY:\n raise ValueError(\n f\"Partial AUC computation not available in\"\n f\" multilabel/multiclass setting, 'max_fpr' must be\"\n f\" set to `None`, received `{max_fpr}`.\"\n )\n\n # calculate fpr, tpr\n if mode == DataType.MULTILABEL:\n if average == AverageMethod.MICRO:\n fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights)\n elif num_classes:\n # for multilabel we iteratively evaluate roc in a binary fashion\n output = [\n roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights)\n for i in range(num_classes)\n ]\n fpr = [o[0] for o in output]\n tpr = [o[1] for o in output]\n else:\n raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')\n else:\n if mode != DataType.BINARY:\n if num_classes is None:\n raise ValueError(\"Detected input to `multiclass` but you did not provide `num_classes` argument\")\n if average == AverageMethod.WEIGHTED and len(torch.unique(target)) < num_classes:\n # If one or more classes has 0 observations, we should exclude them, as its weight will be 0\n target_bool_mat = torch.zeros((len(target), num_classes), dtype=bool)\n target_bool_mat[torch.arange(len(target)), target.long()] = 1\n class_observed = target_bool_mat.sum(axis=0) > 0\n for c in range(num_classes):\n if not class_observed[c]:\n warnings.warn(f'Class {c} had 0 observations, omitted from AUROC calculation', UserWarning)\n preds = preds[:, class_observed]\n target = target_bool_mat[:, class_observed]\n target = torch.where(target)[1]\n num_classes = class_observed.sum()\n if num_classes == 1:\n raise ValueError('Found 1 non-empty class in `multiclass` AUROC calculation')\n fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)\n\n # calculate standard roc auc score\n if max_fpr is None or max_fpr == 1:\n if mode == DataType.MULTILABEL and average == AverageMethod.MICRO:\n pass\n elif num_classes != 1:\n # calculate auc scores per class\n auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)]\n\n # calculate average\n if average == AverageMethod.NONE:\n return tensor(auc_scores)\n if average == AverageMethod.MACRO:\n return torch.mean(torch.stack(auc_scores))\n if average == AverageMethod.WEIGHTED:\n if mode == DataType.MULTILABEL:\n support = torch.sum(target, dim=0)\n else:\n support = torch.bincount(target.flatten(), minlength=num_classes)\n return torch.sum(torch.stack(auc_scores) * support / support.sum())\n\n allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value)\n raise ValueError(\n f\"Argument `average` expected to be one of the following:\"\n f\" {allowed_average} but got {average}\"\n )\n\n return _auc_compute_without_check(fpr, tpr, 1.0)\n\n _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device\n max_area: Tensor = tensor(max_fpr, device=_device)\n # Add a single point at max_fpr and interpolate its tpr value\n stop = torch.bucketize(max_area, fpr, out_int32=True, right=True)\n weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1])\n interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight)\n tpr = torch.cat([tpr[:stop], interp_tpr.view(1)])\n fpr = torch.cat([fpr[:stop], max_area.view(1)])\n\n # Compute partial AUC\n partial_auc = _auc_compute_without_check(fpr, tpr, 1.0)\n\n # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal\n min_area: Tensor = 0.5 * max_area**2\n return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))\n\n\ndef auroc(\n preds: Tensor,\n target: Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = 'macro',\n max_fpr: Optional[float] = None,\n sample_weights: Optional[Sequence] = None,\n) -> Tensor:\n \"\"\" Compute `Area Under the Receiver Operating Characteristic Curve (ROC AUC)\n <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Further_interpretations>`_\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: Ground truth labels\n num_classes: integer with number of classes for multi-label and multiclass problems.\n Should be set to ``None`` for binary problems\n pos_label: integer determining the positive class. Default is ``None``\n which for binary problem is translate to 1. For multiclass problems\n this argument should not be set as we iteratively change it in the\n range [0,num_classes-1]\n average:\n - ``'micro'`` computes metric globally. Only works for multilabel problems\n - ``'macro'`` computes metric for each class and uniformly averages them\n - ``'weighted'`` computes metric for each class and does a weighted-average,\n where each class is weighted by their support (accounts for class imbalance)\n - ``None`` computes and returns the metric per class\n max_fpr:\n If not ``None``, calculates standardized partial AUC over the\n range [0, max_fpr]. Should be a float between 0 and 1.\n sample_weights: sample weights for each data point\n\n Raises:\n ValueError:\n If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``.\n RuntimeError:\n If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `torch.bucketize`\n which is not available below 1.6.\n ValueError:\n If ``max_fpr`` is not set to ``None`` and the mode is ``not binary``\n since partial AUC computation is not available in multilabel/multiclass.\n ValueError:\n If ``average`` is none of ``None``, ``\"macro\"`` or ``\"weighted\"``.\n\n Example (binary case):\n >>> from torchmetrics.functional import auroc\n >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])\n >>> target = torch.tensor([0, 0, 1, 1, 1])\n >>> auroc(preds, target, pos_label=1)\n tensor(0.5000)\n\n Example (multiclass case):\n >>> preds = torch.tensor([[0.90, 0.05, 0.05],\n ... [0.05, 0.90, 0.05],\n ... [0.05, 0.05, 0.90],\n ... [0.85, 0.05, 0.10],\n ... [0.10, 0.10, 0.80]])\n >>> target = torch.tensor([0, 1, 1, 2, 2])\n >>> auroc(preds, target, num_classes=3)\n tensor(0.7778)\n \"\"\"\n preds, target, mode = _auroc_update(preds, target)\n return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights)\n", "path": "torchmetrics/functional/classification/auroc.py"}]} | 3,211 | 486 |
gh_patches_debug_57638 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-1447 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when running DrQA PyTorch 1.0.0
When running the basic example on SQUAD
```python examples/train_model.py -m drqa -t squad -bs 32```
Throwing this.
```[ training... ]
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:177: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
alpha_flat = F.softmax(scores.view(-1, y.size(1)))
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:237: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
alpha = F.softmax(scores)
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:210: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
alpha = F.log_softmax(xWy)
Traceback (most recent call last):
File "examples/train_model.py", line 16, in <module>
TrainLoop(opt).train()
File "/content/DuReader/data/ParlAI/parlai/scripts/train_model.py", line 500, in train
world.parley()
File "/content/DuReader/data/ParlAI/parlai/core/worlds.py", line 641, in parley
batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])
File "/content/DuReader/data/ParlAI/parlai/core/worlds.py", line 614, in batch_act
batch_actions = a.batch_act(batch_observation)
File "/content/DuReader/data/ParlAI/parlai/agents/drqa/drqa.py", line 227, in batch_act
self.model.update(batch)
File "/content/DuReader/data/ParlAI/parlai/agents/drqa/model.py", line 102, in update
self.train_loss.update(loss.data[0], ex[0].size(0))
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
```
I think this is related to the new version of PyTorch and the fix should be fairly trivial. Anyone working on fixing it soon? If not I could probably do a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/agents/drqa/model.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 import torch
7 import torch.optim as optim
8 import torch.nn.functional as F
9 import numpy as np
10 import logging
11
12 from torch.autograd import Variable
13 from .utils import load_embeddings, AverageMeter
14 from .rnn_reader import RnnDocReader
15
16 logger = logging.getLogger('DrQA')
17
18
19 class DocReaderModel(object):
20 """High level model that handles intializing the underlying network
21 architecture, saving, updating examples, and predicting examples.
22 """
23
24 def __init__(self, opt, word_dict, feature_dict, state_dict=None):
25 # Book-keeping.
26 self.opt = opt
27 self.word_dict = word_dict
28 self.feature_dict = feature_dict
29 self.updates = 0
30 self.train_loss = AverageMeter()
31
32 # Building network.
33 self.network = RnnDocReader(opt)
34 if state_dict:
35 new_state = set(self.network.state_dict().keys())
36 for k in list(state_dict['network'].keys()):
37 if k not in new_state:
38 del state_dict['network'][k]
39 self.network.load_state_dict(state_dict['network'])
40
41 # Building optimizer.
42 parameters = [p for p in self.network.parameters() if p.requires_grad]
43 if opt['optimizer'] == 'sgd':
44 self.optimizer = optim.SGD(parameters, opt['learning_rate'],
45 momentum=opt['momentum'],
46 weight_decay=opt['weight_decay'])
47 elif opt['optimizer'] == 'adamax':
48 self.optimizer = optim.Adamax(parameters,
49 weight_decay=opt['weight_decay'])
50 else:
51 raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
52
53 def set_embeddings(self):
54 # Read word embeddings.
55 if not self.opt.get('embedding_file'):
56 logger.warning('[ WARNING: No embeddings provided. '
57 'Keeping random initialization. ]')
58 return
59 logger.info('[ Loading pre-trained embeddings ]')
60 embeddings = load_embeddings(self.opt, self.word_dict)
61 logger.info('[ Num embeddings = %d ]' % embeddings.size(0))
62
63 # Sanity check dimensions
64 new_size = embeddings.size()
65 old_size = self.network.embedding.weight.size()
66 if new_size[1] != old_size[1]:
67 raise RuntimeError('Embedding dimensions do not match.')
68 if new_size[0] != old_size[0]:
69 logger.warning(
70 '[ WARNING: Number of embeddings changed (%d->%d) ]' %
71 (old_size[0], new_size[0])
72 )
73
74 # Swap weights
75 self.network.embedding.weight.data = embeddings
76
77 # If partially tuning the embeddings, keep the old values
78 if self.opt['tune_partial'] > 0:
79 if self.opt['tune_partial'] + 2 < embeddings.size(0):
80 fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]
81 self.network.fixed_embedding = fixed_embedding
82
83 def update(self, ex):
84 # Train mode
85 self.network.train()
86
87 # Transfer to GPU
88 if self.opt['cuda']:
89 inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]
90 target_s = Variable(ex[5].cuda(non_blocking=True))
91 target_e = Variable(ex[6].cuda(non_blocking=True))
92 else:
93 inputs = [Variable(e) for e in ex[:5]]
94 target_s = Variable(ex[5])
95 target_e = Variable(ex[6])
96
97 # Run forward
98 score_s, score_e = self.network(*inputs)
99
100 # Compute loss and accuracies
101 loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
102 self.train_loss.update(loss.data[0], ex[0].size(0))
103
104 # Clear gradients and run backward
105 self.optimizer.zero_grad()
106 loss.backward()
107
108 # Clip gradients
109 torch.nn.utils.clip_grad_norm(self.network.parameters(),
110 self.opt['grad_clipping'])
111
112 # Update parameters
113 self.optimizer.step()
114 self.updates += 1
115
116 # Reset any partially fixed parameters (e.g. rare words)
117 self.reset_parameters()
118
119 def predict(self, ex):
120 # Eval mode
121 self.network.eval()
122
123 # Transfer to GPU
124 if self.opt['cuda']:
125 inputs = [Variable(e.cuda(non_blocking=True), volatile=True)
126 for e in ex[:5]]
127 else:
128 inputs = [Variable(e, volatile=True) for e in ex[:5]]
129
130 # Run forward
131 score_s, score_e = self.network(*inputs)
132
133 # Transfer to CPU/normal tensors for numpy ops
134 score_s = score_s.data.cpu()
135 score_e = score_e.data.cpu()
136
137 # Get argmax text spans
138 text = ex[-2]
139 spans = ex[-1]
140 predictions = []
141 pred_scores = []
142 max_len = self.opt['max_len'] or score_s.size(1)
143 for i in range(score_s.size(0)):
144 scores = torch.ger(score_s[i], score_e[i])
145 scores.triu_().tril_(max_len - 1)
146 scores = scores.numpy()
147 s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
148 s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
149 predictions.append(text[i][s_offset:e_offset])
150 pred_scores.append(np.max(scores))
151
152 return predictions, pred_scores
153
154 def reset_parameters(self):
155 # Reset fixed embeddings to original value
156 if self.opt['tune_partial'] > 0:
157 offset = self.opt['tune_partial'] + 2
158 if offset < self.network.embedding.weight.data.size(0):
159 self.network.embedding.weight.data[offset:] \
160 = self.network.fixed_embedding
161
162 def save(self, filename):
163 params = {
164 'state_dict': {
165 'network': self.network.state_dict(),
166 },
167 'feature_dict': self.feature_dict,
168 'config': self.opt,
169 }
170 try:
171 torch.save(params, filename)
172 except BaseException:
173 logger.warn('[ WARN: Saving failed... continuing anyway. ]')
174
175 def cuda(self):
176 self.network.cuda()
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlai/agents/drqa/model.py b/parlai/agents/drqa/model.py
--- a/parlai/agents/drqa/model.py
+++ b/parlai/agents/drqa/model.py
@@ -99,7 +99,7 @@
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
- self.train_loss.update(loss.data[0], ex[0].size(0))
+ self.train_loss.update(loss.data.item(), ex[0].size(0))
# Clear gradients and run backward
self.optimizer.zero_grad()
| {"golden_diff": "diff --git a/parlai/agents/drqa/model.py b/parlai/agents/drqa/model.py\n--- a/parlai/agents/drqa/model.py\n+++ b/parlai/agents/drqa/model.py\n@@ -99,7 +99,7 @@\n \n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n- self.train_loss.update(loss.data[0], ex[0].size(0))\n+ self.train_loss.update(loss.data.item(), ex[0].size(0))\n \n # Clear gradients and run backward\n self.optimizer.zero_grad()\n", "issue": "Error when running DrQA PyTorch 1.0.0\nWhen running the basic example on SQUAD \r\n```python examples/train_model.py -m drqa -t squad -bs 32```\r\nThrowing this. \r\n```[ training... ]\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:177: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha_flat = F.softmax(scores.view(-1, y.size(1)))\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:237: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha = F.softmax(scores)\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:210: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha = F.log_softmax(xWy)\r\nTraceback (most recent call last):\r\n File \"examples/train_model.py\", line 16, in <module>\r\n TrainLoop(opt).train()\r\n File \"/content/DuReader/data/ParlAI/parlai/scripts/train_model.py\", line 500, in train\r\n world.parley()\r\n File \"/content/DuReader/data/ParlAI/parlai/core/worlds.py\", line 641, in parley\r\n batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])\r\n File \"/content/DuReader/data/ParlAI/parlai/core/worlds.py\", line 614, in batch_act\r\n batch_actions = a.batch_act(batch_observation)\r\n File \"/content/DuReader/data/ParlAI/parlai/agents/drqa/drqa.py\", line 227, in batch_act\r\n self.model.update(batch)\r\n File \"/content/DuReader/data/ParlAI/parlai/agents/drqa/model.py\", line 102, in update\r\n self.train_loss.update(loss.data[0], ex[0].size(0))\r\nIndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number\r\n```\r\nI think this is related to the new version of PyTorch and the fix should be fairly trivial. Anyone working on fixing it soon? If not I could probably do a PR. \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport logging\n\nfrom torch.autograd import Variable\nfrom .utils import load_embeddings, AverageMeter\nfrom .rnn_reader import RnnDocReader\n\nlogger = logging.getLogger('DrQA')\n\n\nclass DocReaderModel(object):\n \"\"\"High level model that handles intializing the underlying network\n architecture, saving, updating examples, and predicting examples.\n \"\"\"\n\n def __init__(self, opt, word_dict, feature_dict, state_dict=None):\n # Book-keeping.\n self.opt = opt\n self.word_dict = word_dict\n self.feature_dict = feature_dict\n self.updates = 0\n self.train_loss = AverageMeter()\n\n # Building network.\n self.network = RnnDocReader(opt)\n if state_dict:\n new_state = set(self.network.state_dict().keys())\n for k in list(state_dict['network'].keys()):\n if k not in new_state:\n del state_dict['network'][k]\n self.network.load_state_dict(state_dict['network'])\n\n # Building optimizer.\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n if opt['optimizer'] == 'sgd':\n self.optimizer = optim.SGD(parameters, opt['learning_rate'],\n momentum=opt['momentum'],\n weight_decay=opt['weight_decay'])\n elif opt['optimizer'] == 'adamax':\n self.optimizer = optim.Adamax(parameters,\n weight_decay=opt['weight_decay'])\n else:\n raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])\n\n def set_embeddings(self):\n # Read word embeddings.\n if not self.opt.get('embedding_file'):\n logger.warning('[ WARNING: No embeddings provided. '\n 'Keeping random initialization. ]')\n return\n logger.info('[ Loading pre-trained embeddings ]')\n embeddings = load_embeddings(self.opt, self.word_dict)\n logger.info('[ Num embeddings = %d ]' % embeddings.size(0))\n\n # Sanity check dimensions\n new_size = embeddings.size()\n old_size = self.network.embedding.weight.size()\n if new_size[1] != old_size[1]:\n raise RuntimeError('Embedding dimensions do not match.')\n if new_size[0] != old_size[0]:\n logger.warning(\n '[ WARNING: Number of embeddings changed (%d->%d) ]' %\n (old_size[0], new_size[0])\n )\n\n # Swap weights\n self.network.embedding.weight.data = embeddings\n\n # If partially tuning the embeddings, keep the old values\n if self.opt['tune_partial'] > 0:\n if self.opt['tune_partial'] + 2 < embeddings.size(0):\n fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]\n self.network.fixed_embedding = fixed_embedding\n\n def update(self, ex):\n # Train mode\n self.network.train()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]\n target_s = Variable(ex[5].cuda(non_blocking=True))\n target_e = Variable(ex[6].cuda(non_blocking=True))\n else:\n inputs = [Variable(e) for e in ex[:5]]\n target_s = Variable(ex[5])\n target_e = Variable(ex[6])\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n self.train_loss.update(loss.data[0], ex[0].size(0))\n\n # Clear gradients and run backward\n self.optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n torch.nn.utils.clip_grad_norm(self.network.parameters(),\n self.opt['grad_clipping'])\n\n # Update parameters\n self.optimizer.step()\n self.updates += 1\n\n # Reset any partially fixed parameters (e.g. rare words)\n self.reset_parameters()\n\n def predict(self, ex):\n # Eval mode\n self.network.eval()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True), volatile=True)\n for e in ex[:5]]\n else:\n inputs = [Variable(e, volatile=True) for e in ex[:5]]\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Transfer to CPU/normal tensors for numpy ops\n score_s = score_s.data.cpu()\n score_e = score_e.data.cpu()\n\n # Get argmax text spans\n text = ex[-2]\n spans = ex[-1]\n predictions = []\n pred_scores = []\n max_len = self.opt['max_len'] or score_s.size(1)\n for i in range(score_s.size(0)):\n scores = torch.ger(score_s[i], score_e[i])\n scores.triu_().tril_(max_len - 1)\n scores = scores.numpy()\n s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)\n s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]\n predictions.append(text[i][s_offset:e_offset])\n pred_scores.append(np.max(scores))\n\n return predictions, pred_scores\n\n def reset_parameters(self):\n # Reset fixed embeddings to original value\n if self.opt['tune_partial'] > 0:\n offset = self.opt['tune_partial'] + 2\n if offset < self.network.embedding.weight.data.size(0):\n self.network.embedding.weight.data[offset:] \\\n = self.network.fixed_embedding\n\n def save(self, filename):\n params = {\n 'state_dict': {\n 'network': self.network.state_dict(),\n },\n 'feature_dict': self.feature_dict,\n 'config': self.opt,\n }\n try:\n torch.save(params, filename)\n except BaseException:\n logger.warn('[ WARN: Saving failed... continuing anyway. ]')\n\n def cuda(self):\n self.network.cuda()\n", "path": "parlai/agents/drqa/model.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport logging\n\nfrom torch.autograd import Variable\nfrom .utils import load_embeddings, AverageMeter\nfrom .rnn_reader import RnnDocReader\n\nlogger = logging.getLogger('DrQA')\n\n\nclass DocReaderModel(object):\n \"\"\"High level model that handles intializing the underlying network\n architecture, saving, updating examples, and predicting examples.\n \"\"\"\n\n def __init__(self, opt, word_dict, feature_dict, state_dict=None):\n # Book-keeping.\n self.opt = opt\n self.word_dict = word_dict\n self.feature_dict = feature_dict\n self.updates = 0\n self.train_loss = AverageMeter()\n\n # Building network.\n self.network = RnnDocReader(opt)\n if state_dict:\n new_state = set(self.network.state_dict().keys())\n for k in list(state_dict['network'].keys()):\n if k not in new_state:\n del state_dict['network'][k]\n self.network.load_state_dict(state_dict['network'])\n\n # Building optimizer.\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n if opt['optimizer'] == 'sgd':\n self.optimizer = optim.SGD(parameters, opt['learning_rate'],\n momentum=opt['momentum'],\n weight_decay=opt['weight_decay'])\n elif opt['optimizer'] == 'adamax':\n self.optimizer = optim.Adamax(parameters,\n weight_decay=opt['weight_decay'])\n else:\n raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])\n\n def set_embeddings(self):\n # Read word embeddings.\n if not self.opt.get('embedding_file'):\n logger.warning('[ WARNING: No embeddings provided. '\n 'Keeping random initialization. ]')\n return\n logger.info('[ Loading pre-trained embeddings ]')\n embeddings = load_embeddings(self.opt, self.word_dict)\n logger.info('[ Num embeddings = %d ]' % embeddings.size(0))\n\n # Sanity check dimensions\n new_size = embeddings.size()\n old_size = self.network.embedding.weight.size()\n if new_size[1] != old_size[1]:\n raise RuntimeError('Embedding dimensions do not match.')\n if new_size[0] != old_size[0]:\n logger.warning(\n '[ WARNING: Number of embeddings changed (%d->%d) ]' %\n (old_size[0], new_size[0])\n )\n\n # Swap weights\n self.network.embedding.weight.data = embeddings\n\n # If partially tuning the embeddings, keep the old values\n if self.opt['tune_partial'] > 0:\n if self.opt['tune_partial'] + 2 < embeddings.size(0):\n fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]\n self.network.fixed_embedding = fixed_embedding\n\n def update(self, ex):\n # Train mode\n self.network.train()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]\n target_s = Variable(ex[5].cuda(non_blocking=True))\n target_e = Variable(ex[6].cuda(non_blocking=True))\n else:\n inputs = [Variable(e) for e in ex[:5]]\n target_s = Variable(ex[5])\n target_e = Variable(ex[6])\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n self.train_loss.update(loss.data.item(), ex[0].size(0))\n\n # Clear gradients and run backward\n self.optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n torch.nn.utils.clip_grad_norm(self.network.parameters(),\n self.opt['grad_clipping'])\n\n # Update parameters\n self.optimizer.step()\n self.updates += 1\n\n # Reset any partially fixed parameters (e.g. rare words)\n self.reset_parameters()\n\n def predict(self, ex):\n # Eval mode\n self.network.eval()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True), volatile=True)\n for e in ex[:5]]\n else:\n inputs = [Variable(e, volatile=True) for e in ex[:5]]\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Transfer to CPU/normal tensors for numpy ops\n score_s = score_s.data.cpu()\n score_e = score_e.data.cpu()\n\n # Get argmax text spans\n text = ex[-2]\n spans = ex[-1]\n predictions = []\n pred_scores = []\n max_len = self.opt['max_len'] or score_s.size(1)\n for i in range(score_s.size(0)):\n scores = torch.ger(score_s[i], score_e[i])\n scores.triu_().tril_(max_len - 1)\n scores = scores.numpy()\n s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)\n s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]\n predictions.append(text[i][s_offset:e_offset])\n pred_scores.append(np.max(scores))\n\n return predictions, pred_scores\n\n def reset_parameters(self):\n # Reset fixed embeddings to original value\n if self.opt['tune_partial'] > 0:\n offset = self.opt['tune_partial'] + 2\n if offset < self.network.embedding.weight.data.size(0):\n self.network.embedding.weight.data[offset:] \\\n = self.network.fixed_embedding\n\n def save(self, filename):\n params = {\n 'state_dict': {\n 'network': self.network.state_dict(),\n },\n 'feature_dict': self.feature_dict,\n 'config': self.opt,\n }\n try:\n torch.save(params, filename)\n except BaseException:\n logger.warn('[ WARN: Saving failed... continuing anyway. ]')\n\n def cuda(self):\n self.network.cuda()\n", "path": "parlai/agents/drqa/model.py"}]} | 2,606 | 149 |
gh_patches_debug_37093 | rasdani/github-patches | git_diff | TheAlgorithms__Python-2221 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
maths/number_of_digits.py is naive!
<code>[maths/number_of_digits.py](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)</code> has a naive method. The suggested method has a time complexity of O(n). We can count number of digits of a number in O(1).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/number_of_digits.py`
Content:
```
1 def num_digits(n: int) -> int:
2 """
3 Find the number of digits in a number.
4
5 >>> num_digits(12345)
6 5
7 >>> num_digits(123)
8 3
9 """
10 digits = 0
11 while n > 0:
12 n = n // 10
13 digits += 1
14 return digits
15
16
17 if __name__ == "__main__":
18 print(num_digits(12345)) # ===> 5
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py
--- a/maths/number_of_digits.py
+++ b/maths/number_of_digits.py
@@ -1,3 +1,7 @@
+import math
+from timeit import timeit
+
+
def num_digits(n: int) -> int:
"""
Find the number of digits in a number.
@@ -14,5 +18,82 @@
return digits
+def num_digits_fast(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used as logarithm for negative numbers is not defined.
+
+ >>> num_digits_fast(12345)
+ 5
+ >>> num_digits_fast(123)
+ 3
+ """
+ return (math.floor(math.log(abs(n), 10) + 1))
+
+
+def num_digits_faster(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used for negative numbers
+
+ >>> num_digits_faster(12345)
+ 5
+ >>> num_digits_faster(123)
+ 3
+ """
+ return (len(str(abs(n))))
+
+
+def benchmark() -> None:
+ """
+ Benchmark code for comparing 3 functions,
+ with 3 different length int values.
+ """
+ print('\nFor small_num = ', small_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(small_num),
+ '\ttime =', timeit("z.num_digits(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(small_num),
+ '\ttime =', timeit("z.num_digits_fast(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(small_num),
+ '\ttime =', timeit("z.num_digits_faster(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+
+ print('\nFor medium_num = ', medium_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(medium_num),
+ '\ttime =', timeit("z.num_digits(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(medium_num),
+ '\ttime =', timeit("z.num_digits_fast(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(medium_num),
+ '\ttime =', timeit("z.num_digits_faster(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+
+ print('\nFor large_num = ', large_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(large_num),
+ '\ttime =', timeit("z.num_digits(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(large_num),
+ '\ttime =', timeit("z.num_digits_fast(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(large_num),
+ '\ttime =', timeit("z.num_digits_faster(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+
+
if __name__ == "__main__":
- print(num_digits(12345)) # ===> 5
+ small_num = 262144
+ medium_num = 1125899906842624
+ large_num = 1267650600228229401496703205376
+ benchmark()
| {"golden_diff": "diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py\n--- a/maths/number_of_digits.py\n+++ b/maths/number_of_digits.py\n@@ -1,3 +1,7 @@\n+import math\n+from timeit import timeit\n+\n+\n def num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n@@ -14,5 +18,82 @@\n return digits\n \n \n+def num_digits_fast(n: int) -> int:\n+ \"\"\"\n+ Find the number of digits in a number.\n+ abs() is used as logarithm for negative numbers is not defined.\n+\n+ >>> num_digits_fast(12345)\n+ 5\n+ >>> num_digits_fast(123)\n+ 3\n+ \"\"\"\n+ return (math.floor(math.log(abs(n), 10) + 1))\n+\n+\n+def num_digits_faster(n: int) -> int:\n+ \"\"\"\n+ Find the number of digits in a number.\n+ abs() is used for negative numbers\n+\n+ >>> num_digits_faster(12345)\n+ 5\n+ >>> num_digits_faster(123)\n+ 3\n+ \"\"\"\n+ return (len(str(abs(n))))\n+\n+\n+def benchmark() -> None:\n+ \"\"\"\n+ Benchmark code for comparing 3 functions,\n+ with 3 different length int values.\n+ \"\"\"\n+ print('\\nFor small_num = ', small_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(small_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(small_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(small_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+ print('\\nFor medium_num = ', medium_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+ print('\\nFor large_num = ', large_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(large_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(large_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(large_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+\n if __name__ == \"__main__\":\n- print(num_digits(12345)) # ===> 5\n+ small_num = 262144\n+ medium_num = 1125899906842624\n+ large_num = 1267650600228229401496703205376\n+ benchmark()\n", "issue": "maths/number_of_digits.py is naive!\n<code>[maths/number_of_digits.py](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)</code> has a naive method. The suggested method has a time complexity of O(n). We can count number of digits of a number in O(1).\n", "before_files": [{"content": "def num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n\n >>> num_digits(12345)\n 5\n >>> num_digits(123)\n 3\n \"\"\"\n digits = 0\n while n > 0:\n n = n // 10\n digits += 1\n return digits\n\n\nif __name__ == \"__main__\":\n print(num_digits(12345)) # ===> 5\n", "path": "maths/number_of_digits.py"}], "after_files": [{"content": "import math\nfrom timeit import timeit\n\n\ndef num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n\n >>> num_digits(12345)\n 5\n >>> num_digits(123)\n 3\n \"\"\"\n digits = 0\n while n > 0:\n n = n // 10\n digits += 1\n return digits\n\n\ndef num_digits_fast(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n abs() is used as logarithm for negative numbers is not defined.\n\n >>> num_digits_fast(12345)\n 5\n >>> num_digits_fast(123)\n 3\n \"\"\"\n return (math.floor(math.log(abs(n), 10) + 1))\n\n\ndef num_digits_faster(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n abs() is used for negative numbers\n\n >>> num_digits_faster(12345)\n 5\n >>> num_digits_faster(123)\n 3\n \"\"\"\n return (len(str(abs(n))))\n\n\ndef benchmark() -> None:\n \"\"\"\n Benchmark code for comparing 3 functions,\n with 3 different length int values.\n \"\"\"\n print('\\nFor small_num = ', small_num, ':')\n print(\"> num_digits()\",\n '\\t\\tans =', num_digits(small_num),\n '\\ttime =', timeit(\"z.num_digits(z.small_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_fast()\",\n '\\tans =', num_digits_fast(small_num),\n '\\ttime =', timeit(\"z.num_digits_fast(z.small_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_faster()\",\n '\\tans =', num_digits_faster(small_num),\n '\\ttime =', timeit(\"z.num_digits_faster(z.small_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n\n print('\\nFor medium_num = ', medium_num, ':')\n print(\"> num_digits()\",\n '\\t\\tans =', num_digits(medium_num),\n '\\ttime =', timeit(\"z.num_digits(z.medium_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_fast()\",\n '\\tans =', num_digits_fast(medium_num),\n '\\ttime =', timeit(\"z.num_digits_fast(z.medium_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_faster()\",\n '\\tans =', num_digits_faster(medium_num),\n '\\ttime =', timeit(\"z.num_digits_faster(z.medium_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n\n print('\\nFor large_num = ', large_num, ':')\n print(\"> num_digits()\",\n '\\t\\tans =', num_digits(large_num),\n '\\ttime =', timeit(\"z.num_digits(z.large_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_fast()\",\n '\\tans =', num_digits_fast(large_num),\n '\\ttime =', timeit(\"z.num_digits_fast(z.large_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_faster()\",\n '\\tans =', num_digits_faster(large_num),\n '\\ttime =', timeit(\"z.num_digits_faster(z.large_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n\n\nif __name__ == \"__main__\":\n small_num = 262144\n medium_num = 1125899906842624\n large_num = 1267650600228229401496703205376\n benchmark()\n", "path": "maths/number_of_digits.py"}]} | 474 | 977 |
gh_patches_debug_1975 | rasdani/github-patches | git_diff | jupyter__docker-stacks-1859 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] - Health Check fails if you change the port jupyter runs on
### What docker image(s) are you using?
minimal-notebook
### OS system and architecture running docker image
RHEL7 docker swarm
### What Docker command are you running?
Not really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.
For this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'
To change the port I made a slight modification the docker image to set the internal port it runs on (see below)
The problem is the docker container dies unexpectedly after running for 46 seconds.
During that time the service is visible within the conatiner, but not external to the container.
This is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.
(see below)
### How to Reproduce the problem?
Dockerfile, to set port
```dockerfile
FROM "jupyter/minimal-notebook:latest"
# Update Jupyter configuration to set port
RUN set -eux; \
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\
:;
```
You can also change the port in other ways such as...
Creating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)
or setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)
### Command output
When you build and then run the modified docker image, `docker ps` reports
`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080
46 seconds after starting the container dies with a unhelpful (Signal 15)
Log output...
```
[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:
[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[C 2022-10-28 05:20:00.397 ServerApp]
To access the server, open this file in a browser:
file:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html
Or copy and paste one of these URLs:
http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
Entered start.sh with args: jupyter lab
Executing the command: jupyter lab
[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping
[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions
[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals
```
### Expected behavior
Changing the internal port should not take days of work to track down, it should be straight forward and documented.
The healthcheck should also be properly documented in jupyter-stacks documentation.
This will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.
Yes you can map the port when doing a 'docker run', but that is NOT always possible.
### Actual behavior
Internal Port changing is undocumented in stacks
Heathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.
Days of work lost trying to figure out what should be a straight forward and simple task.
### Anything else?
There is an existing environment variable "JUPYTER_PORT" that defines the default port.
But any such setting is currently overridden by the configuration files in `/etc/jupyter`
This may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.
in Dockerfile....
```
HEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \
CMD wget -O- --no-verbose --tries=1 --no-check-certificate \
http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1
```
That Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.
[BUG] - Health Check fails if you change the port jupyter runs on
### What docker image(s) are you using?
minimal-notebook
### OS system and architecture running docker image
RHEL7 docker swarm
### What Docker command are you running?
Not really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.
For this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'
To change the port I made a slight modification the docker image to set the internal port it runs on (see below)
The problem is the docker container dies unexpectedly after running for 46 seconds.
During that time the service is visible within the conatiner, but not external to the container.
This is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.
(see below)
### How to Reproduce the problem?
Dockerfile, to set port
```dockerfile
FROM "jupyter/minimal-notebook:latest"
# Update Jupyter configuration to set port
RUN set -eux; \
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\
:;
```
You can also change the port in other ways such as...
Creating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)
or setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)
### Command output
When you build and then run the modified docker image, `docker ps` reports
`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080
46 seconds after starting the container dies with a unhelpful (Signal 15)
Log output...
```
[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:
[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[C 2022-10-28 05:20:00.397 ServerApp]
To access the server, open this file in a browser:
file:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html
Or copy and paste one of these URLs:
http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
Entered start.sh with args: jupyter lab
Executing the command: jupyter lab
[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping
[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions
[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals
```
### Expected behavior
Changing the internal port should not take days of work to track down, it should be straight forward and documented.
The healthcheck should also be properly documented in jupyter-stacks documentation.
This will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.
Yes you can map the port when doing a 'docker run', but that is NOT always possible.
### Actual behavior
Internal Port changing is undocumented in stacks
Heathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.
Days of work lost trying to figure out what should be a straight forward and simple task.
### Anything else?
There is an existing environment variable "JUPYTER_PORT" that defines the default port.
But any such setting is currently overridden by the configuration files in `/etc/jupyter`
This may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.
in Dockerfile....
```
HEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \
CMD wget -O- --no-verbose --tries=1 --no-check-certificate \
http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1
```
That Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `base-notebook/jupyter_server_config.py`
Content:
```
1 # Copyright (c) Jupyter Development Team.
2 # Distributed under the terms of the Modified BSD License.
3 # mypy: ignore-errors
4 import os
5 import stat
6 import subprocess
7
8 from jupyter_core.paths import jupyter_data_dir
9
10 c = get_config() # noqa: F821
11 c.ServerApp.ip = "0.0.0.0"
12 c.ServerApp.port = 8888
13 c.ServerApp.open_browser = False
14
15 # to output both image/svg+xml and application/pdf plot formats in the notebook file
16 c.InlineBackend.figure_formats = {"png", "jpeg", "svg", "pdf"}
17
18 # https://github.com/jupyter/notebook/issues/3130
19 c.FileContentsManager.delete_to_trash = False
20
21 # Generate a self-signed certificate
22 OPENSSL_CONFIG = """\
23 [req]
24 distinguished_name = req_distinguished_name
25 [req_distinguished_name]
26 """
27 if "GEN_CERT" in os.environ:
28 dir_name = jupyter_data_dir()
29 pem_file = os.path.join(dir_name, "notebook.pem")
30 os.makedirs(dir_name, exist_ok=True)
31
32 # Generate an openssl.cnf file to set the distinguished name
33 cnf_file = os.path.join(os.getenv("CONDA_DIR", "/usr/lib"), "ssl", "openssl.cnf")
34 if not os.path.isfile(cnf_file):
35 with open(cnf_file, "w") as fh:
36 fh.write(OPENSSL_CONFIG)
37
38 # Generate a certificate if one doesn't exist on disk
39 subprocess.check_call(
40 [
41 "openssl",
42 "req",
43 "-new",
44 "-newkey=rsa:2048",
45 "-days=365",
46 "-nodes",
47 "-x509",
48 "-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated",
49 f"-keyout={pem_file}",
50 f"-out={pem_file}",
51 ]
52 )
53 # Restrict access to the file
54 os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
55 c.ServerApp.certfile = pem_file
56
57 # Change default umask for all subprocesses of the notebook server if set in
58 # the environment
59 if "NB_UMASK" in os.environ:
60 os.umask(int(os.environ["NB_UMASK"], 8))
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/base-notebook/jupyter_server_config.py b/base-notebook/jupyter_server_config.py
--- a/base-notebook/jupyter_server_config.py
+++ b/base-notebook/jupyter_server_config.py
@@ -9,7 +9,6 @@
c = get_config() # noqa: F821
c.ServerApp.ip = "0.0.0.0"
-c.ServerApp.port = 8888
c.ServerApp.open_browser = False
# to output both image/svg+xml and application/pdf plot formats in the notebook file
| {"golden_diff": "diff --git a/base-notebook/jupyter_server_config.py b/base-notebook/jupyter_server_config.py\n--- a/base-notebook/jupyter_server_config.py\n+++ b/base-notebook/jupyter_server_config.py\n@@ -9,7 +9,6 @@\n \n c = get_config() # noqa: F821\n c.ServerApp.ip = \"0.0.0.0\"\n-c.ServerApp.port = 8888\n c.ServerApp.open_browser = False\n \n # to output both image/svg+xml and application/pdf plot formats in the notebook file\n", "issue": "[BUG] - Health Check fails if you change the port jupyter runs on\n### What docker image(s) are you using?\r\n\r\nminimal-notebook\r\n\r\n### OS system and architecture running docker image\r\n\r\nRHEL7 docker swarm\r\n\r\n### What Docker command are you running?\r\n\r\nNot really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.\r\n\r\nFor this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'\r\nTo change the port I made a slight modification the docker image to set the internal port it runs on (see below)\r\n\r\nThe problem is the docker container dies unexpectedly after running for 46 seconds.\r\nDuring that time the service is visible within the conatiner, but not external to the container.\r\nThis is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.\r\n(see below)\r\n\r\n\r\n### How to Reproduce the problem?\r\n\r\nDockerfile, to set port\r\n\r\n```dockerfile\r\nFROM \"jupyter/minimal-notebook:latest\"\r\n# Update Jupyter configuration to set port\r\nRUN set -eux; \\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\\\r\n :;\r\n```\r\nYou can also change the port in other ways such as... \r\n\r\nCreating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)\r\n\r\nor setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)\r\n\r\n\r\n\r\n### Command output\r\n\r\nWhen you build and then run the modified docker image, `docker ps` reports\r\n`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080\r\n\r\n46 seconds after starting the container dies with a unhelpful (Signal 15)\r\n\r\nLog output...\r\n```\r\n[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:\r\n[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\n[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).\r\n[C 2022-10-28 05:20:00.397 ServerApp] \r\n\r\nTo access the server, open this file in a browser:\r\nfile:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html\r\nOr copy and paste one of these URLs:\r\nhttp://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nEntered start.sh with args: jupyter lab\r\nExecuting the command: jupyter lab\r\n[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping\r\n[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions\r\n[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals\r\n```\r\n\r\n### Expected behavior\r\n\r\nChanging the internal port should not take days of work to track down, it should be straight forward and documented.\r\n\r\nThe healthcheck should also be properly documented in jupyter-stacks documentation.\r\nThis will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.\r\n\r\nYes you can map the port when doing a 'docker run', but that is NOT always possible.\r\n\r\n### Actual behavior\r\n\r\nInternal Port changing is undocumented in stacks\r\n\r\nHeathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.\r\n\r\nDays of work lost trying to figure out what should be a straight forward and simple task.\r\n\r\n### Anything else?\r\n\r\nThere is an existing environment variable \"JUPYTER_PORT\" that defines the default port.\r\nBut any such setting is currently overridden by the configuration files in `/etc/jupyter`\r\n\r\nThis may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.\r\n\r\nin Dockerfile....\r\n```\r\nHEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \\\r\n CMD wget -O- --no-verbose --tries=1 --no-check-certificate \\\r\n http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1\r\n```\r\n\r\nThat Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.\n[BUG] - Health Check fails if you change the port jupyter runs on\n### What docker image(s) are you using?\r\n\r\nminimal-notebook\r\n\r\n### OS system and architecture running docker image\r\n\r\nRHEL7 docker swarm\r\n\r\n### What Docker command are you running?\r\n\r\nNot really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.\r\n\r\nFor this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'\r\nTo change the port I made a slight modification the docker image to set the internal port it runs on (see below)\r\n\r\nThe problem is the docker container dies unexpectedly after running for 46 seconds.\r\nDuring that time the service is visible within the conatiner, but not external to the container.\r\nThis is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.\r\n(see below)\r\n\r\n\r\n### How to Reproduce the problem?\r\n\r\nDockerfile, to set port\r\n\r\n```dockerfile\r\nFROM \"jupyter/minimal-notebook:latest\"\r\n# Update Jupyter configuration to set port\r\nRUN set -eux; \\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\\\r\n :;\r\n```\r\nYou can also change the port in other ways such as... \r\n\r\nCreating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)\r\n\r\nor setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)\r\n\r\n\r\n\r\n### Command output\r\n\r\nWhen you build and then run the modified docker image, `docker ps` reports\r\n`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080\r\n\r\n46 seconds after starting the container dies with a unhelpful (Signal 15)\r\n\r\nLog output...\r\n```\r\n[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:\r\n[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\n[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).\r\n[C 2022-10-28 05:20:00.397 ServerApp] \r\n\r\nTo access the server, open this file in a browser:\r\nfile:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html\r\nOr copy and paste one of these URLs:\r\nhttp://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nEntered start.sh with args: jupyter lab\r\nExecuting the command: jupyter lab\r\n[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping\r\n[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions\r\n[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals\r\n```\r\n\r\n### Expected behavior\r\n\r\nChanging the internal port should not take days of work to track down, it should be straight forward and documented.\r\n\r\nThe healthcheck should also be properly documented in jupyter-stacks documentation.\r\nThis will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.\r\n\r\nYes you can map the port when doing a 'docker run', but that is NOT always possible.\r\n\r\n### Actual behavior\r\n\r\nInternal Port changing is undocumented in stacks\r\n\r\nHeathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.\r\n\r\nDays of work lost trying to figure out what should be a straight forward and simple task.\r\n\r\n### Anything else?\r\n\r\nThere is an existing environment variable \"JUPYTER_PORT\" that defines the default port.\r\nBut any such setting is currently overridden by the configuration files in `/etc/jupyter`\r\n\r\nThis may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.\r\n\r\nin Dockerfile....\r\n```\r\nHEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \\\r\n CMD wget -O- --no-verbose --tries=1 --no-check-certificate \\\r\n http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1\r\n```\r\n\r\nThat Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.\n", "before_files": [{"content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n# mypy: ignore-errors\nimport os\nimport stat\nimport subprocess\n\nfrom jupyter_core.paths import jupyter_data_dir\n\nc = get_config() # noqa: F821\nc.ServerApp.ip = \"0.0.0.0\"\nc.ServerApp.port = 8888\nc.ServerApp.open_browser = False\n\n# to output both image/svg+xml and application/pdf plot formats in the notebook file\nc.InlineBackend.figure_formats = {\"png\", \"jpeg\", \"svg\", \"pdf\"}\n\n# https://github.com/jupyter/notebook/issues/3130\nc.FileContentsManager.delete_to_trash = False\n\n# Generate a self-signed certificate\nOPENSSL_CONFIG = \"\"\"\\\n[req]\ndistinguished_name = req_distinguished_name\n[req_distinguished_name]\n\"\"\"\nif \"GEN_CERT\" in os.environ:\n dir_name = jupyter_data_dir()\n pem_file = os.path.join(dir_name, \"notebook.pem\")\n os.makedirs(dir_name, exist_ok=True)\n\n # Generate an openssl.cnf file to set the distinguished name\n cnf_file = os.path.join(os.getenv(\"CONDA_DIR\", \"/usr/lib\"), \"ssl\", \"openssl.cnf\")\n if not os.path.isfile(cnf_file):\n with open(cnf_file, \"w\") as fh:\n fh.write(OPENSSL_CONFIG)\n\n # Generate a certificate if one doesn't exist on disk\n subprocess.check_call(\n [\n \"openssl\",\n \"req\",\n \"-new\",\n \"-newkey=rsa:2048\",\n \"-days=365\",\n \"-nodes\",\n \"-x509\",\n \"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated\",\n f\"-keyout={pem_file}\",\n f\"-out={pem_file}\",\n ]\n )\n # Restrict access to the file\n os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)\n c.ServerApp.certfile = pem_file\n\n# Change default umask for all subprocesses of the notebook server if set in\n# the environment\nif \"NB_UMASK\" in os.environ:\n os.umask(int(os.environ[\"NB_UMASK\"], 8))\n", "path": "base-notebook/jupyter_server_config.py"}], "after_files": [{"content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n# mypy: ignore-errors\nimport os\nimport stat\nimport subprocess\n\nfrom jupyter_core.paths import jupyter_data_dir\n\nc = get_config() # noqa: F821\nc.ServerApp.ip = \"0.0.0.0\"\nc.ServerApp.open_browser = False\n\n# to output both image/svg+xml and application/pdf plot formats in the notebook file\nc.InlineBackend.figure_formats = {\"png\", \"jpeg\", \"svg\", \"pdf\"}\n\n# https://github.com/jupyter/notebook/issues/3130\nc.FileContentsManager.delete_to_trash = False\n\n# Generate a self-signed certificate\nOPENSSL_CONFIG = \"\"\"\\\n[req]\ndistinguished_name = req_distinguished_name\n[req_distinguished_name]\n\"\"\"\nif \"GEN_CERT\" in os.environ:\n dir_name = jupyter_data_dir()\n pem_file = os.path.join(dir_name, \"notebook.pem\")\n os.makedirs(dir_name, exist_ok=True)\n\n # Generate an openssl.cnf file to set the distinguished name\n cnf_file = os.path.join(os.getenv(\"CONDA_DIR\", \"/usr/lib\"), \"ssl\", \"openssl.cnf\")\n if not os.path.isfile(cnf_file):\n with open(cnf_file, \"w\") as fh:\n fh.write(OPENSSL_CONFIG)\n\n # Generate a certificate if one doesn't exist on disk\n subprocess.check_call(\n [\n \"openssl\",\n \"req\",\n \"-new\",\n \"-newkey=rsa:2048\",\n \"-days=365\",\n \"-nodes\",\n \"-x509\",\n \"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated\",\n f\"-keyout={pem_file}\",\n f\"-out={pem_file}\",\n ]\n )\n # Restrict access to the file\n os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)\n c.ServerApp.certfile = pem_file\n\n# Change default umask for all subprocesses of the notebook server if set in\n# the environment\nif \"NB_UMASK\" in os.environ:\n os.umask(int(os.environ[\"NB_UMASK\"], 8))\n", "path": "base-notebook/jupyter_server_config.py"}]} | 3,553 | 119 |
gh_patches_debug_3043 | rasdani/github-patches | git_diff | docker__docker-py-1250 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
attach is causing an "Invalid Argument" exception from os.read
``` python
stream = client.attach(container, stream=True, stdout=True, stderr=True)
for chunk in stream:
pass
```
Results in:
```
File "/Users/michael/work/oss/marina/marina/build.py", line 695, in watcher
for chunk in stream:
File ".venv/lib/python3.5/site-packages/docker/utils/socket.py", line 67, in frames_iter
yield read(socket, n)
File ".venv/lib/python3.5/site-packages/docker/utils/socket.py", line 25, in read
return os.read(socket.fileno(), n)
OSError: [Errno 22] Invalid argument
```
Using docker-py 1.10.2 on OS X 10.11.6 with docker for mac 1.12.0-rc3. Reverting to 1.9.0 fixes the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/socket.py`
Content:
```
1 import errno
2 import os
3 import select
4 import struct
5
6 import six
7
8 try:
9 from ..transport import NpipeSocket
10 except ImportError:
11 NpipeSocket = type(None)
12
13
14 class SocketError(Exception):
15 pass
16
17
18 def read(socket, n=4096):
19 """
20 Reads at most n bytes from socket
21 """
22
23 recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
24
25 # wait for data to become available
26 if not isinstance(socket, NpipeSocket):
27 select.select([socket], [], [])
28
29 try:
30 if hasattr(socket, 'recv'):
31 return socket.recv(n)
32 return os.read(socket.fileno(), n)
33 except EnvironmentError as e:
34 if e.errno not in recoverable_errors:
35 raise
36
37
38 def read_exactly(socket, n):
39 """
40 Reads exactly n bytes from socket
41 Raises SocketError if there isn't enough data
42 """
43 data = six.binary_type()
44 while len(data) < n:
45 next_data = read(socket, n - len(data))
46 if not next_data:
47 raise SocketError("Unexpected EOF")
48 data += next_data
49 return data
50
51
52 def next_frame_size(socket):
53 """
54 Returns the size of the next frame of data waiting to be read from socket,
55 according to the protocol defined here:
56
57 https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
58 """
59 try:
60 data = read_exactly(socket, 8)
61 except SocketError:
62 return 0
63
64 _, actual = struct.unpack('>BxxxL', data)
65 return actual
66
67
68 def frames_iter(socket):
69 """
70 Returns a generator of frames read from socket
71 """
72 n = next_frame_size(socket)
73 while n > 0:
74 yield read(socket, n)
75 n = next_frame_size(socket)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/utils/socket.py b/docker/utils/socket.py
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -69,7 +69,11 @@
"""
Returns a generator of frames read from socket
"""
- n = next_frame_size(socket)
- while n > 0:
- yield read(socket, n)
+ while True:
n = next_frame_size(socket)
+ if n == 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ n -= len(result)
+ yield result
| {"golden_diff": "diff --git a/docker/utils/socket.py b/docker/utils/socket.py\n--- a/docker/utils/socket.py\n+++ b/docker/utils/socket.py\n@@ -69,7 +69,11 @@\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n- n = next_frame_size(socket)\n- while n > 0:\n- yield read(socket, n)\n+ while True:\n n = next_frame_size(socket)\n+ if n == 0:\n+ break\n+ while n > 0:\n+ result = read(socket, n)\n+ n -= len(result)\n+ yield result\n", "issue": "attach is causing an \"Invalid Argument\" exception from os.read\n``` python\nstream = client.attach(container, stream=True, stdout=True, stderr=True)\nfor chunk in stream:\n pass\n```\n\nResults in:\n\n```\n File \"/Users/michael/work/oss/marina/marina/build.py\", line 695, in watcher\n for chunk in stream:\n File \".venv/lib/python3.5/site-packages/docker/utils/socket.py\", line 67, in frames_iter\n yield read(socket, n)\n File \".venv/lib/python3.5/site-packages/docker/utils/socket.py\", line 25, in read\n return os.read(socket.fileno(), n)\nOSError: [Errno 22] Invalid argument\n```\n\nUsing docker-py 1.10.2 on OS X 10.11.6 with docker for mac 1.12.0-rc3. Reverting to 1.9.0 fixes the issue.\n\n", "before_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return 0\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n n = next_frame_size(socket)\n while n > 0:\n yield read(socket, n)\n n = next_frame_size(socket)\n", "path": "docker/utils/socket.py"}], "after_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return 0\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n while True:\n n = next_frame_size(socket)\n if n == 0:\n break\n while n > 0:\n result = read(socket, n)\n n -= len(result)\n yield result\n", "path": "docker/utils/socket.py"}]} | 1,028 | 134 |
gh_patches_debug_23435 | rasdani/github-patches | git_diff | inventree__InvenTree-1934 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Heartbeat has a datetime warning
From the worker logs:
`inventree-worker | 00:51:15 [Q] INFO Process-1 created a task from schedule [InvenTree.tasks.heartbeat]
inventree-worker | 00:51:15 [Q] INFO Process-1:3 processing [may-cat-eleven-beer]
inventree-worker | Could not perform heartbeat task - App registry not ready
inventree-worker | /usr/lib/python3.8/site-packages/django/db/models/fields/__init__.py:1416: RuntimeWarning: DateTimeField Task.started received a naive datetime (2021-08-10 00:21:15.816018) while time zone support is active.
inventree-worker | warnings.warn("DateTimeField %s received a naive datetime (%s)"
`
The heartbeat task references a non timezoned date which django complains about as USE_TZ is True in the settings.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/InvenTree/tasks.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 import re
5 import json
6 import requests
7 import logging
8
9 from datetime import datetime, timedelta
10
11 from django.core.exceptions import AppRegistryNotReady
12 from django.db.utils import OperationalError, ProgrammingError
13
14
15 logger = logging.getLogger("inventree")
16
17
18 def schedule_task(taskname, **kwargs):
19 """
20 Create a scheduled task.
21 If the task has already been scheduled, ignore!
22 """
23
24 # If unspecified, repeat indefinitely
25 repeats = kwargs.pop('repeats', -1)
26 kwargs['repeats'] = repeats
27
28 try:
29 from django_q.models import Schedule
30 except (AppRegistryNotReady):
31 logger.info("Could not start background tasks - App registry not ready")
32 return
33
34 try:
35 # If this task is already scheduled, don't schedule it again
36 # Instead, update the scheduling parameters
37 if Schedule.objects.filter(func=taskname).exists():
38 logger.info(f"Scheduled task '{taskname}' already exists - updating!")
39
40 Schedule.objects.filter(func=taskname).update(**kwargs)
41 else:
42 logger.info(f"Creating scheduled task '{taskname}'")
43
44 Schedule.objects.create(
45 name=taskname,
46 func=taskname,
47 **kwargs
48 )
49 except (OperationalError, ProgrammingError):
50 # Required if the DB is not ready yet
51 pass
52
53
54 def offload_task(taskname, force_sync=False, *args, **kwargs):
55 """
56 Create an AsyncTask if workers are running.
57 This is different to a 'scheduled' task,
58 in that it only runs once!
59
60 If workers are not running or force_sync flag
61 is set then the task is ran synchronously.
62 """
63
64 try:
65 from django_q.tasks import AsyncTask
66 except (AppRegistryNotReady):
67 logger.warning("Could not offload task - app registry not ready")
68 return
69 import importlib
70 from InvenTree.status import is_worker_running
71
72 if is_worker_running() and not force_sync:
73 # Running as asynchronous task
74 try:
75 task = AsyncTask(taskname, *args, **kwargs)
76 task.run()
77 except ImportError:
78 logger.warning(f"WARNING: '{taskname}' not started - Function not found")
79 else:
80 # Split path
81 try:
82 app, mod, func = taskname.split('.')
83 app_mod = app + '.' + mod
84 except ValueError:
85 logger.warning(f"WARNING: '{taskname}' not started - Malformed function path")
86 return
87
88 # Import module from app
89 try:
90 _mod = importlib.import_module(app_mod)
91 except ModuleNotFoundError:
92 logger.warning(f"WARNING: '{taskname}' not started - No module named '{app_mod}'")
93 return
94
95 # Retrieve function
96 try:
97 _func = getattr(_mod, func)
98 except AttributeError:
99 # getattr does not work for local import
100 _func = None
101
102 try:
103 if not _func:
104 _func = eval(func)
105 except NameError:
106 logger.warning(f"WARNING: '{taskname}' not started - No function named '{func}'")
107 return
108
109 # Workers are not running: run it as synchronous task
110 _func()
111
112
113 def heartbeat():
114 """
115 Simple task which runs at 5 minute intervals,
116 so we can determine that the background worker
117 is actually running.
118
119 (There is probably a less "hacky" way of achieving this)?
120 """
121
122 try:
123 from django_q.models import Success
124 logger.info("Could not perform heartbeat task - App registry not ready")
125 except AppRegistryNotReady:
126 return
127
128 threshold = datetime.now() - timedelta(minutes=30)
129
130 # Delete heartbeat results more than half an hour old,
131 # otherwise they just create extra noise
132 heartbeats = Success.objects.filter(
133 func='InvenTree.tasks.heartbeat',
134 started__lte=threshold
135 )
136
137 heartbeats.delete()
138
139
140 def delete_successful_tasks():
141 """
142 Delete successful task logs
143 which are more than a month old.
144 """
145
146 try:
147 from django_q.models import Success
148 except AppRegistryNotReady:
149 logger.info("Could not perform 'delete_successful_tasks' - App registry not ready")
150 return
151
152 threshold = datetime.now() - timedelta(days=30)
153
154 results = Success.objects.filter(
155 started__lte=threshold
156 )
157
158 results.delete()
159
160
161 def check_for_updates():
162 """
163 Check if there is an update for InvenTree
164 """
165
166 try:
167 import common.models
168 except AppRegistryNotReady:
169 # Apps not yet loaded!
170 logger.info("Could not perform 'check_for_updates' - App registry not ready")
171 return
172
173 response = requests.get('https://api.github.com/repos/inventree/inventree/releases/latest')
174
175 if not response.status_code == 200:
176 raise ValueError(f'Unexpected status code from GitHub API: {response.status_code}')
177
178 data = json.loads(response.text)
179
180 tag = data.get('tag_name', None)
181
182 if not tag:
183 raise ValueError("'tag_name' missing from GitHub response")
184
185 match = re.match(r"^.*(\d+)\.(\d+)\.(\d+).*$", tag)
186
187 if not len(match.groups()) == 3:
188 logger.warning(f"Version '{tag}' did not match expected pattern")
189 return
190
191 latest_version = [int(x) for x in match.groups()]
192
193 if not len(latest_version) == 3:
194 raise ValueError(f"Version '{tag}' is not correct format")
195
196 logger.info(f"Latest InvenTree version: '{tag}'")
197
198 # Save the version to the database
199 common.models.InvenTreeSetting.set_setting(
200 'INVENTREE_LATEST_VERSION',
201 tag,
202 None
203 )
204
205
206 def update_exchange_rates():
207 """
208 Update currency exchange rates
209 """
210
211 try:
212 from InvenTree.exchange import InvenTreeExchange
213 from djmoney.contrib.exchange.models import ExchangeBackend, Rate
214 from common.settings import currency_code_default, currency_codes
215 except AppRegistryNotReady:
216 # Apps not yet loaded!
217 logger.info("Could not perform 'update_exchange_rates' - App registry not ready")
218 return
219 except:
220 # Other error?
221 return
222
223 # Test to see if the database is ready yet
224 try:
225 backend = ExchangeBackend.objects.get(name='InvenTreeExchange')
226 except ExchangeBackend.DoesNotExist:
227 pass
228 except:
229 # Some other error
230 print("Database not ready")
231 return
232
233 backend = InvenTreeExchange()
234 print(f"Updating exchange rates from {backend.url}")
235
236 base = currency_code_default()
237
238 print(f"Using base currency '{base}'")
239
240 backend.update_rates(base_currency=base)
241
242 # Remove any exchange rates which are not in the provided currencies
243 Rate.objects.filter(backend="InvenTreeExchange").exclude(currency__in=currency_codes()).delete()
244
245
246 def send_email(subject, body, recipients, from_email=None):
247 """
248 Send an email with the specified subject and body,
249 to the specified recipients list.
250 """
251
252 if type(recipients) == str:
253 recipients = [recipients]
254
255 offload_task(
256 'django.core.mail.send_mail',
257 subject, body,
258 from_email,
259 recipients,
260 )
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/InvenTree/InvenTree/tasks.py b/InvenTree/InvenTree/tasks.py
--- a/InvenTree/InvenTree/tasks.py
+++ b/InvenTree/InvenTree/tasks.py
@@ -6,7 +6,8 @@
import requests
import logging
-from datetime import datetime, timedelta
+from datetime import timedelta
+from django.utils import timezone
from django.core.exceptions import AppRegistryNotReady
from django.db.utils import OperationalError, ProgrammingError
@@ -125,7 +126,7 @@
except AppRegistryNotReady:
return
- threshold = datetime.now() - timedelta(minutes=30)
+ threshold = timezone.now() - timedelta(minutes=30)
# Delete heartbeat results more than half an hour old,
# otherwise they just create extra noise
@@ -149,7 +150,7 @@
logger.info("Could not perform 'delete_successful_tasks' - App registry not ready")
return
- threshold = datetime.now() - timedelta(days=30)
+ threshold = timezone.now() - timedelta(days=30)
results = Success.objects.filter(
started__lte=threshold
| {"golden_diff": "diff --git a/InvenTree/InvenTree/tasks.py b/InvenTree/InvenTree/tasks.py\n--- a/InvenTree/InvenTree/tasks.py\n+++ b/InvenTree/InvenTree/tasks.py\n@@ -6,7 +6,8 @@\n import requests\n import logging\n \n-from datetime import datetime, timedelta\n+from datetime import timedelta\n+from django.utils import timezone\n \n from django.core.exceptions import AppRegistryNotReady\n from django.db.utils import OperationalError, ProgrammingError\n@@ -125,7 +126,7 @@\n except AppRegistryNotReady:\n return\n \n- threshold = datetime.now() - timedelta(minutes=30)\n+ threshold = timezone.now() - timedelta(minutes=30)\n \n # Delete heartbeat results more than half an hour old,\n # otherwise they just create extra noise\n@@ -149,7 +150,7 @@\n logger.info(\"Could not perform 'delete_successful_tasks' - App registry not ready\")\n return\n \n- threshold = datetime.now() - timedelta(days=30)\n+ threshold = timezone.now() - timedelta(days=30)\n \n results = Success.objects.filter(\n started__lte=threshold\n", "issue": "Heartbeat has a datetime warning\nFrom the worker logs:\r\n`inventree-worker | 00:51:15 [Q] INFO Process-1 created a task from schedule [InvenTree.tasks.heartbeat]\r\ninventree-worker | 00:51:15 [Q] INFO Process-1:3 processing [may-cat-eleven-beer]\r\ninventree-worker | Could not perform heartbeat task - App registry not ready\r\ninventree-worker | /usr/lib/python3.8/site-packages/django/db/models/fields/__init__.py:1416: RuntimeWarning: DateTimeField Task.started received a naive datetime (2021-08-10 00:21:15.816018) while time zone support is active.\r\ninventree-worker | warnings.warn(\"DateTimeField %s received a naive datetime (%s)\"\r\n`\r\n\r\nThe heartbeat task references a non timezoned date which django complains about as USE_TZ is True in the settings.py\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\nimport json\nimport requests\nimport logging\n\nfrom datetime import datetime, timedelta\n\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.db.utils import OperationalError, ProgrammingError\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\ndef schedule_task(taskname, **kwargs):\n \"\"\"\n Create a scheduled task.\n If the task has already been scheduled, ignore!\n \"\"\"\n\n # If unspecified, repeat indefinitely\n repeats = kwargs.pop('repeats', -1)\n kwargs['repeats'] = repeats\n\n try:\n from django_q.models import Schedule\n except (AppRegistryNotReady):\n logger.info(\"Could not start background tasks - App registry not ready\")\n return\n\n try:\n # If this task is already scheduled, don't schedule it again\n # Instead, update the scheduling parameters\n if Schedule.objects.filter(func=taskname).exists():\n logger.info(f\"Scheduled task '{taskname}' already exists - updating!\")\n\n Schedule.objects.filter(func=taskname).update(**kwargs)\n else:\n logger.info(f\"Creating scheduled task '{taskname}'\")\n\n Schedule.objects.create(\n name=taskname,\n func=taskname,\n **kwargs\n )\n except (OperationalError, ProgrammingError):\n # Required if the DB is not ready yet\n pass\n\n\ndef offload_task(taskname, force_sync=False, *args, **kwargs):\n \"\"\"\n Create an AsyncTask if workers are running.\n This is different to a 'scheduled' task,\n in that it only runs once!\n\n If workers are not running or force_sync flag\n is set then the task is ran synchronously.\n \"\"\"\n\n try:\n from django_q.tasks import AsyncTask\n except (AppRegistryNotReady):\n logger.warning(\"Could not offload task - app registry not ready\")\n return\n import importlib\n from InvenTree.status import is_worker_running\n\n if is_worker_running() and not force_sync:\n # Running as asynchronous task\n try:\n task = AsyncTask(taskname, *args, **kwargs)\n task.run()\n except ImportError:\n logger.warning(f\"WARNING: '{taskname}' not started - Function not found\")\n else:\n # Split path\n try:\n app, mod, func = taskname.split('.')\n app_mod = app + '.' + mod\n except ValueError:\n logger.warning(f\"WARNING: '{taskname}' not started - Malformed function path\")\n return\n\n # Import module from app\n try:\n _mod = importlib.import_module(app_mod)\n except ModuleNotFoundError:\n logger.warning(f\"WARNING: '{taskname}' not started - No module named '{app_mod}'\")\n return\n\n # Retrieve function\n try:\n _func = getattr(_mod, func)\n except AttributeError:\n # getattr does not work for local import\n _func = None\n\n try:\n if not _func:\n _func = eval(func)\n except NameError:\n logger.warning(f\"WARNING: '{taskname}' not started - No function named '{func}'\")\n return\n \n # Workers are not running: run it as synchronous task\n _func()\n\n\ndef heartbeat():\n \"\"\"\n Simple task which runs at 5 minute intervals,\n so we can determine that the background worker\n is actually running.\n\n (There is probably a less \"hacky\" way of achieving this)?\n \"\"\"\n\n try:\n from django_q.models import Success\n logger.info(\"Could not perform heartbeat task - App registry not ready\")\n except AppRegistryNotReady:\n return\n\n threshold = datetime.now() - timedelta(minutes=30)\n\n # Delete heartbeat results more than half an hour old,\n # otherwise they just create extra noise\n heartbeats = Success.objects.filter(\n func='InvenTree.tasks.heartbeat',\n started__lte=threshold\n )\n\n heartbeats.delete()\n\n\ndef delete_successful_tasks():\n \"\"\"\n Delete successful task logs\n which are more than a month old.\n \"\"\"\n\n try:\n from django_q.models import Success\n except AppRegistryNotReady:\n logger.info(\"Could not perform 'delete_successful_tasks' - App registry not ready\")\n return\n\n threshold = datetime.now() - timedelta(days=30)\n\n results = Success.objects.filter(\n started__lte=threshold\n )\n\n results.delete()\n\n\ndef check_for_updates():\n \"\"\"\n Check if there is an update for InvenTree\n \"\"\"\n\n try:\n import common.models\n except AppRegistryNotReady:\n # Apps not yet loaded!\n logger.info(\"Could not perform 'check_for_updates' - App registry not ready\")\n return\n\n response = requests.get('https://api.github.com/repos/inventree/inventree/releases/latest')\n\n if not response.status_code == 200:\n raise ValueError(f'Unexpected status code from GitHub API: {response.status_code}')\n\n data = json.loads(response.text)\n\n tag = data.get('tag_name', None)\n\n if not tag:\n raise ValueError(\"'tag_name' missing from GitHub response\")\n\n match = re.match(r\"^.*(\\d+)\\.(\\d+)\\.(\\d+).*$\", tag)\n\n if not len(match.groups()) == 3:\n logger.warning(f\"Version '{tag}' did not match expected pattern\")\n return\n\n latest_version = [int(x) for x in match.groups()]\n\n if not len(latest_version) == 3:\n raise ValueError(f\"Version '{tag}' is not correct format\")\n\n logger.info(f\"Latest InvenTree version: '{tag}'\")\n\n # Save the version to the database\n common.models.InvenTreeSetting.set_setting(\n 'INVENTREE_LATEST_VERSION',\n tag,\n None\n )\n\n\ndef update_exchange_rates():\n \"\"\"\n Update currency exchange rates\n \"\"\"\n\n try:\n from InvenTree.exchange import InvenTreeExchange\n from djmoney.contrib.exchange.models import ExchangeBackend, Rate\n from common.settings import currency_code_default, currency_codes\n except AppRegistryNotReady:\n # Apps not yet loaded!\n logger.info(\"Could not perform 'update_exchange_rates' - App registry not ready\")\n return\n except:\n # Other error?\n return\n\n # Test to see if the database is ready yet\n try:\n backend = ExchangeBackend.objects.get(name='InvenTreeExchange')\n except ExchangeBackend.DoesNotExist:\n pass\n except:\n # Some other error\n print(\"Database not ready\")\n return\n\n backend = InvenTreeExchange()\n print(f\"Updating exchange rates from {backend.url}\")\n\n base = currency_code_default()\n\n print(f\"Using base currency '{base}'\")\n\n backend.update_rates(base_currency=base)\n\n # Remove any exchange rates which are not in the provided currencies\n Rate.objects.filter(backend=\"InvenTreeExchange\").exclude(currency__in=currency_codes()).delete()\n\n\ndef send_email(subject, body, recipients, from_email=None):\n \"\"\"\n Send an email with the specified subject and body,\n to the specified recipients list.\n \"\"\"\n\n if type(recipients) == str:\n recipients = [recipients]\n\n offload_task(\n 'django.core.mail.send_mail',\n subject, body,\n from_email,\n recipients,\n )\n", "path": "InvenTree/InvenTree/tasks.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\nimport json\nimport requests\nimport logging\n\nfrom datetime import timedelta\nfrom django.utils import timezone\n\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.db.utils import OperationalError, ProgrammingError\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\ndef schedule_task(taskname, **kwargs):\n \"\"\"\n Create a scheduled task.\n If the task has already been scheduled, ignore!\n \"\"\"\n\n # If unspecified, repeat indefinitely\n repeats = kwargs.pop('repeats', -1)\n kwargs['repeats'] = repeats\n\n try:\n from django_q.models import Schedule\n except (AppRegistryNotReady):\n logger.info(\"Could not start background tasks - App registry not ready\")\n return\n\n try:\n # If this task is already scheduled, don't schedule it again\n # Instead, update the scheduling parameters\n if Schedule.objects.filter(func=taskname).exists():\n logger.info(f\"Scheduled task '{taskname}' already exists - updating!\")\n\n Schedule.objects.filter(func=taskname).update(**kwargs)\n else:\n logger.info(f\"Creating scheduled task '{taskname}'\")\n\n Schedule.objects.create(\n name=taskname,\n func=taskname,\n **kwargs\n )\n except (OperationalError, ProgrammingError):\n # Required if the DB is not ready yet\n pass\n\n\ndef offload_task(taskname, force_sync=False, *args, **kwargs):\n \"\"\"\n Create an AsyncTask if workers are running.\n This is different to a 'scheduled' task,\n in that it only runs once!\n\n If workers are not running or force_sync flag\n is set then the task is ran synchronously.\n \"\"\"\n\n try:\n from django_q.tasks import AsyncTask\n except (AppRegistryNotReady):\n logger.warning(\"Could not offload task - app registry not ready\")\n return\n import importlib\n from InvenTree.status import is_worker_running\n\n if is_worker_running() and not force_sync:\n # Running as asynchronous task\n try:\n task = AsyncTask(taskname, *args, **kwargs)\n task.run()\n except ImportError:\n logger.warning(f\"WARNING: '{taskname}' not started - Function not found\")\n else:\n # Split path\n try:\n app, mod, func = taskname.split('.')\n app_mod = app + '.' + mod\n except ValueError:\n logger.warning(f\"WARNING: '{taskname}' not started - Malformed function path\")\n return\n\n # Import module from app\n try:\n _mod = importlib.import_module(app_mod)\n except ModuleNotFoundError:\n logger.warning(f\"WARNING: '{taskname}' not started - No module named '{app_mod}'\")\n return\n\n # Retrieve function\n try:\n _func = getattr(_mod, func)\n except AttributeError:\n # getattr does not work for local import\n _func = None\n\n try:\n if not _func:\n _func = eval(func)\n except NameError:\n logger.warning(f\"WARNING: '{taskname}' not started - No function named '{func}'\")\n return\n \n # Workers are not running: run it as synchronous task\n _func()\n\n\ndef heartbeat():\n \"\"\"\n Simple task which runs at 5 minute intervals,\n so we can determine that the background worker\n is actually running.\n\n (There is probably a less \"hacky\" way of achieving this)?\n \"\"\"\n\n try:\n from django_q.models import Success\n logger.info(\"Could not perform heartbeat task - App registry not ready\")\n except AppRegistryNotReady:\n return\n\n threshold = timezone.now() - timedelta(minutes=30)\n\n # Delete heartbeat results more than half an hour old,\n # otherwise they just create extra noise\n heartbeats = Success.objects.filter(\n func='InvenTree.tasks.heartbeat',\n started__lte=threshold\n )\n\n heartbeats.delete()\n\n\ndef delete_successful_tasks():\n \"\"\"\n Delete successful task logs\n which are more than a month old.\n \"\"\"\n\n try:\n from django_q.models import Success\n except AppRegistryNotReady:\n logger.info(\"Could not perform 'delete_successful_tasks' - App registry not ready\")\n return\n\n threshold = timezone.now() - timedelta(days=30)\n\n results = Success.objects.filter(\n started__lte=threshold\n )\n\n results.delete()\n\n\ndef check_for_updates():\n \"\"\"\n Check if there is an update for InvenTree\n \"\"\"\n\n try:\n import common.models\n except AppRegistryNotReady:\n # Apps not yet loaded!\n logger.info(\"Could not perform 'check_for_updates' - App registry not ready\")\n return\n\n response = requests.get('https://api.github.com/repos/inventree/inventree/releases/latest')\n\n if not response.status_code == 200:\n raise ValueError(f'Unexpected status code from GitHub API: {response.status_code}')\n\n data = json.loads(response.text)\n\n tag = data.get('tag_name', None)\n\n if not tag:\n raise ValueError(\"'tag_name' missing from GitHub response\")\n\n match = re.match(r\"^.*(\\d+)\\.(\\d+)\\.(\\d+).*$\", tag)\n\n if not len(match.groups()) == 3:\n logger.warning(f\"Version '{tag}' did not match expected pattern\")\n return\n\n latest_version = [int(x) for x in match.groups()]\n\n if not len(latest_version) == 3:\n raise ValueError(f\"Version '{tag}' is not correct format\")\n\n logger.info(f\"Latest InvenTree version: '{tag}'\")\n\n # Save the version to the database\n common.models.InvenTreeSetting.set_setting(\n 'INVENTREE_LATEST_VERSION',\n tag,\n None\n )\n\n\ndef update_exchange_rates():\n \"\"\"\n Update currency exchange rates\n \"\"\"\n\n try:\n from InvenTree.exchange import InvenTreeExchange\n from djmoney.contrib.exchange.models import ExchangeBackend, Rate\n from common.settings import currency_code_default, currency_codes\n except AppRegistryNotReady:\n # Apps not yet loaded!\n logger.info(\"Could not perform 'update_exchange_rates' - App registry not ready\")\n return\n except:\n # Other error?\n return\n\n # Test to see if the database is ready yet\n try:\n backend = ExchangeBackend.objects.get(name='InvenTreeExchange')\n except ExchangeBackend.DoesNotExist:\n pass\n except:\n # Some other error\n print(\"Database not ready\")\n return\n\n backend = InvenTreeExchange()\n print(f\"Updating exchange rates from {backend.url}\")\n\n base = currency_code_default()\n\n print(f\"Using base currency '{base}'\")\n\n backend.update_rates(base_currency=base)\n\n # Remove any exchange rates which are not in the provided currencies\n Rate.objects.filter(backend=\"InvenTreeExchange\").exclude(currency__in=currency_codes()).delete()\n\n\ndef send_email(subject, body, recipients, from_email=None):\n \"\"\"\n Send an email with the specified subject and body,\n to the specified recipients list.\n \"\"\"\n\n if type(recipients) == str:\n recipients = [recipients]\n\n offload_task(\n 'django.core.mail.send_mail',\n subject, body,\n from_email,\n recipients,\n )\n", "path": "InvenTree/InvenTree/tasks.py"}]} | 2,772 | 261 |
gh_patches_debug_54113 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix readthedocs build
As discussed in 01/28/2021 SIG.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15 from configparser import ConfigParser
16 from os import listdir
17 from os.path import isdir, join
18
19 # configure django to avoid the following exception:
20 # django.core.exceptions.ImproperlyConfigured: Requested settings, but settings
21 # are not configured. You must either define the environment variable
22 # DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.
23 from django.conf import settings
24
25 settings.configure()
26
27 exp = "../exporter"
28 exp_dirs = [
29 os.path.abspath("/".join(["../exporter", f, "src"]))
30 for f in listdir(exp)
31 if isdir(join(exp, f))
32 ]
33
34 instr = "../instrumentation"
35 instr_dirs = [
36 os.path.abspath("/".join(["../instrumentation", f, "src"]))
37 for f in listdir(instr)
38 if isdir(join(instr, f))
39 ]
40
41 sdk_ext = "../sdk-extension"
42 sdk_ext_dirs = [
43 os.path.abspath("/".join(["../sdk-extension", f, "src"]))
44 for f in listdir(sdk_ext)
45 if isdir(join(sdk_ext, f))
46 ]
47
48 sys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs
49
50 # -- Project information -----------------------------------------------------
51
52 project = "OpenTelemetry Python Contrib"
53 copyright = "OpenTelemetry Authors" # pylint: disable=redefined-builtin
54 author = "OpenTelemetry Authors"
55
56
57 # -- General configuration ---------------------------------------------------
58
59 # Easy automatic cross-references for `code in backticks`
60 default_role = "any"
61
62 # Add any Sphinx extension module names here, as strings. They can be
63 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
64 # ones.
65 extensions = [
66 # API doc generation
67 "sphinx.ext.autodoc",
68 # Support for google-style docstrings
69 "sphinx.ext.napoleon",
70 # Infer types from hints instead of docstrings
71 "sphinx_autodoc_typehints",
72 # Add links to source from generated docs
73 "sphinx.ext.viewcode",
74 # Link to other sphinx docs
75 "sphinx.ext.intersphinx",
76 # Add a .nojekyll file to the generated HTML docs
77 # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing
78 "sphinx.ext.githubpages",
79 # Support external links to different versions in the Github repo
80 "sphinx.ext.extlinks",
81 ]
82
83 intersphinx_mapping = {
84 "python": ("https://docs.python.org/3/", None),
85 "opentracing": (
86 "https://opentracing-python.readthedocs.io/en/latest/",
87 None,
88 ),
89 "aiohttp": ("https://aiohttp.readthedocs.io/en/stable/", None),
90 "wrapt": ("https://wrapt.readthedocs.io/en/latest/", None),
91 "pymongo": ("https://pymongo.readthedocs.io/en/stable/", None),
92 "opentelemetry": (
93 "https://opentelemetry-python.readthedocs.io/en/latest/",
94 None,
95 ),
96 }
97
98 # http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky
99 # Sphinx will warn about all references where the target cannot be found.
100 nitpicky = True
101 # Sphinx does not recognize generic type TypeVars
102 # Container supposedly were fixed, but does not work
103 # https://github.com/sphinx-doc/sphinx/pull/3744
104 nitpick_ignore = []
105
106 cfg = ConfigParser()
107 cfg.read("./nitpick-exceptions.ini")
108 mcfg = cfg["default"]
109
110
111 def getlistcfg(strval):
112 return [
113 val.strip()
114 for line in strval.split("\n")
115 for val in line.split(",")
116 if val.strip()
117 ]
118
119
120 if "class_references" in mcfg:
121 class_references = getlistcfg(mcfg["class_references"])
122 for class_reference in class_references:
123 nitpick_ignore.append(("py:class", class_reference,))
124
125 if "anys" in mcfg:
126 anys = getlistcfg(mcfg["anys"])
127 for any in anys:
128 nitpick_ignore.append(("any", any,))
129
130 # Add any paths that contain templates here, relative to this directory.
131 templates_path = ["_templates"]
132
133 # List of patterns, relative to source directory, that match files and
134 # directories to ignore when looking for source files.
135 # This pattern also affects html_static_path and html_extra_path.
136 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
137
138 autodoc_default_options = {
139 "members": True,
140 "undoc-members": True,
141 "show-inheritance": True,
142 "member-order": "bysource",
143 }
144
145 # -- Options for HTML output -------------------------------------------------
146
147 # The theme to use for HTML and HTML Help pages. See the documentation for
148 # a list of builtin themes.
149 #
150 html_theme = "sphinx_rtd_theme"
151
152 # Add any paths that contain custom static files (such as style sheets) here,
153 # relative to this directory. They are copied after the builtin static files,
154 # so a file named "default.css" will overwrite the builtin "default.css".
155 html_static_path = []
156
157 # Support external links to specific versions of the files in the Github repo
158 branch = os.environ.get("READTHEDOCS_VERSION")
159 if branch is None or branch == "latest":
160 branch = "main"
161
162 REPO = "open-telemetry/opentelemetry-python-contrib/"
163 scm_raw_web = "https://raw.githubusercontent.com/" + REPO + branch
164 scm_web = "https://github.com/" + REPO + "blob/" + branch
165
166 # Store variables in the epilogue so they are globally available.
167 rst_epilog = """
168 .. |SCM_WEB| replace:: {s}
169 .. |SCM_RAW_WEB| replace:: {sr}
170 .. |SCM_BRANCH| replace:: {b}
171 """.format(
172 s=scm_web, sr=scm_raw_web, b=branch
173 )
174
175 # used to have links to repo files
176 extlinks = {
177 "scm_raw_web": (scm_raw_web + "/%s", "scm_raw_web"),
178 "scm_web": (scm_web + "/%s", "scm_web"),
179 }
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -124,8 +124,8 @@
if "anys" in mcfg:
anys = getlistcfg(mcfg["anys"])
- for any in anys:
- nitpick_ignore.append(("any", any,))
+ for _any in anys:
+ nitpick_ignore.append(("any", _any,))
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -124,8 +124,8 @@\n \n if \"anys\" in mcfg:\n anys = getlistcfg(mcfg[\"anys\"])\n- for any in anys:\n- nitpick_ignore.append((\"any\", any,))\n+ for _any in anys:\n+ nitpick_ignore.append((\"any\", _any,))\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "Fix readthedocs build\nAs discussed in 01/28/2021 SIG.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom configparser import ConfigParser\nfrom os import listdir\nfrom os.path import isdir, join\n\n# configure django to avoid the following exception:\n# django.core.exceptions.ImproperlyConfigured: Requested settings, but settings\n# are not configured. You must either define the environment variable\n# DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.\nfrom django.conf import settings\n\nsettings.configure()\n\nexp = \"../exporter\"\nexp_dirs = [\n os.path.abspath(\"/\".join([\"../exporter\", f, \"src\"]))\n for f in listdir(exp)\n if isdir(join(exp, f))\n]\n\ninstr = \"../instrumentation\"\ninstr_dirs = [\n os.path.abspath(\"/\".join([\"../instrumentation\", f, \"src\"]))\n for f in listdir(instr)\n if isdir(join(instr, f))\n]\n\nsdk_ext = \"../sdk-extension\"\nsdk_ext_dirs = [\n os.path.abspath(\"/\".join([\"../sdk-extension\", f, \"src\"]))\n for f in listdir(sdk_ext)\n if isdir(join(sdk_ext, f))\n]\n\nsys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry Python Contrib\"\ncopyright = \"OpenTelemetry Authors\" # pylint: disable=redefined-builtin\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n # Add a .nojekyll file to the generated HTML docs\n # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n \"sphinx.ext.githubpages\",\n # Support external links to different versions in the Github repo\n \"sphinx.ext.extlinks\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"opentracing\": (\n \"https://opentracing-python.readthedocs.io/en/latest/\",\n None,\n ),\n \"aiohttp\": (\"https://aiohttp.readthedocs.io/en/stable/\", None),\n \"wrapt\": (\"https://wrapt.readthedocs.io/en/latest/\", None),\n \"pymongo\": (\"https://pymongo.readthedocs.io/en/stable/\", None),\n \"opentelemetry\": (\n \"https://opentelemetry-python.readthedocs.io/en/latest/\",\n None,\n ),\n}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\n# Sphinx does not recognize generic type TypeVars\n# Container supposedly were fixed, but does not work\n# https://github.com/sphinx-doc/sphinx/pull/3744\nnitpick_ignore = []\n\ncfg = ConfigParser()\ncfg.read(\"./nitpick-exceptions.ini\")\nmcfg = cfg[\"default\"]\n\n\ndef getlistcfg(strval):\n return [\n val.strip()\n for line in strval.split(\"\\n\")\n for val in line.split(\",\")\n if val.strip()\n ]\n\n\nif \"class_references\" in mcfg:\n class_references = getlistcfg(mcfg[\"class_references\"])\n for class_reference in class_references:\n nitpick_ignore.append((\"py:class\", class_reference,))\n\nif \"anys\" in mcfg:\n anys = getlistcfg(mcfg[\"anys\"])\n for any in anys:\n nitpick_ignore.append((\"any\", any,))\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Support external links to specific versions of the files in the Github repo\nbranch = os.environ.get(\"READTHEDOCS_VERSION\")\nif branch is None or branch == \"latest\":\n branch = \"main\"\n\nREPO = \"open-telemetry/opentelemetry-python-contrib/\"\nscm_raw_web = \"https://raw.githubusercontent.com/\" + REPO + branch\nscm_web = \"https://github.com/\" + REPO + \"blob/\" + branch\n\n# Store variables in the epilogue so they are globally available.\nrst_epilog = \"\"\"\n.. |SCM_WEB| replace:: {s}\n.. |SCM_RAW_WEB| replace:: {sr}\n.. |SCM_BRANCH| replace:: {b}\n\"\"\".format(\n s=scm_web, sr=scm_raw_web, b=branch\n)\n\n# used to have links to repo files\nextlinks = {\n \"scm_raw_web\": (scm_raw_web + \"/%s\", \"scm_raw_web\"),\n \"scm_web\": (scm_web + \"/%s\", \"scm_web\"),\n}\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom configparser import ConfigParser\nfrom os import listdir\nfrom os.path import isdir, join\n\n# configure django to avoid the following exception:\n# django.core.exceptions.ImproperlyConfigured: Requested settings, but settings\n# are not configured. You must either define the environment variable\n# DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.\nfrom django.conf import settings\n\nsettings.configure()\n\nexp = \"../exporter\"\nexp_dirs = [\n os.path.abspath(\"/\".join([\"../exporter\", f, \"src\"]))\n for f in listdir(exp)\n if isdir(join(exp, f))\n]\n\ninstr = \"../instrumentation\"\ninstr_dirs = [\n os.path.abspath(\"/\".join([\"../instrumentation\", f, \"src\"]))\n for f in listdir(instr)\n if isdir(join(instr, f))\n]\n\nsdk_ext = \"../sdk-extension\"\nsdk_ext_dirs = [\n os.path.abspath(\"/\".join([\"../sdk-extension\", f, \"src\"]))\n for f in listdir(sdk_ext)\n if isdir(join(sdk_ext, f))\n]\n\nsys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry Python Contrib\"\ncopyright = \"OpenTelemetry Authors\" # pylint: disable=redefined-builtin\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n # Add a .nojekyll file to the generated HTML docs\n # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n \"sphinx.ext.githubpages\",\n # Support external links to different versions in the Github repo\n \"sphinx.ext.extlinks\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"opentracing\": (\n \"https://opentracing-python.readthedocs.io/en/latest/\",\n None,\n ),\n \"aiohttp\": (\"https://aiohttp.readthedocs.io/en/stable/\", None),\n \"wrapt\": (\"https://wrapt.readthedocs.io/en/latest/\", None),\n \"pymongo\": (\"https://pymongo.readthedocs.io/en/stable/\", None),\n \"opentelemetry\": (\n \"https://opentelemetry-python.readthedocs.io/en/latest/\",\n None,\n ),\n}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\n# Sphinx does not recognize generic type TypeVars\n# Container supposedly were fixed, but does not work\n# https://github.com/sphinx-doc/sphinx/pull/3744\nnitpick_ignore = []\n\ncfg = ConfigParser()\ncfg.read(\"./nitpick-exceptions.ini\")\nmcfg = cfg[\"default\"]\n\n\ndef getlistcfg(strval):\n return [\n val.strip()\n for line in strval.split(\"\\n\")\n for val in line.split(\",\")\n if val.strip()\n ]\n\n\nif \"class_references\" in mcfg:\n class_references = getlistcfg(mcfg[\"class_references\"])\n for class_reference in class_references:\n nitpick_ignore.append((\"py:class\", class_reference,))\n\nif \"anys\" in mcfg:\n anys = getlistcfg(mcfg[\"anys\"])\n for _any in anys:\n nitpick_ignore.append((\"any\", _any,))\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Support external links to specific versions of the files in the Github repo\nbranch = os.environ.get(\"READTHEDOCS_VERSION\")\nif branch is None or branch == \"latest\":\n branch = \"main\"\n\nREPO = \"open-telemetry/opentelemetry-python-contrib/\"\nscm_raw_web = \"https://raw.githubusercontent.com/\" + REPO + branch\nscm_web = \"https://github.com/\" + REPO + \"blob/\" + branch\n\n# Store variables in the epilogue so they are globally available.\nrst_epilog = \"\"\"\n.. |SCM_WEB| replace:: {s}\n.. |SCM_RAW_WEB| replace:: {sr}\n.. |SCM_BRANCH| replace:: {b}\n\"\"\".format(\n s=scm_web, sr=scm_raw_web, b=branch\n)\n\n# used to have links to repo files\nextlinks = {\n \"scm_raw_web\": (scm_raw_web + \"/%s\", \"scm_raw_web\"),\n \"scm_web\": (scm_web + \"/%s\", \"scm_web\"),\n}\n", "path": "docs/conf.py"}]} | 2,151 | 123 |
gh_patches_debug_2193 | rasdani/github-patches | git_diff | ansible-collections__community.general-6695 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
read_csv - Key 'Name' was not found in the CSV header fields
##### SUMMARY
The `read_csv` module fails to identify a field, yet displaces the field in the list of available fields.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
read_csv
##### ANSIBLE VERSION
```
ansible 2.9.10
config file = /home/anton/git/ansible-deploy-vmware-vm/ansible.cfg
configured module search path = ['/home/anton/git/ansible-deploy-vmware-vm/library']
ansible python module location = /home/anton/.local/lib/python3.6/site-packages/ansible
executable location = /home/anton/.local/bin/ansible
python version = 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0]
```
##### CONFIGURATION
```
# config file for ansible -- http://ansible.com/
# ==============================================
# nearly all parameters can be overridden in ansible-playbook
# or with command line flags. ansible will read ANSIBLE_CONFIG,
# ansible.cfg in the current working directory, .ansible.cfg in
# the home directory or /etc/ansible/ansible.cfg, whichever it
# finds first
[defaults]
host_key_checking = False
host_key_check = False
ansible_python_interpreter=/usr/bin/python3
log_path = ./ansible.log
#bin_ansible_callbacks=True
#stdout_callback = debug
# some basic default values...
library = ./library
# additional paths to search for roles in, colon separated
roles_path = ./roles
[ssh_connection]
# ssh arguments to use
ssh_args = -o StrictHostKeyChecking=no
timeout=60
```
##### OS / ENVIRONMENT
Ubuntu 20:04
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
---
- name: Right-size VMs
gather_facts: false
hosts: all
connection: local
tasks:
# Read a CSV file and access the first item
- name: Read users from CSV file and return a list
read_csv:
path: "files/vms/6-19-20 Optimization Report - Oversized Virtual Machines Prod2.csv"
key: Name
register: users
- debug:
msg: 'User {{ users.list.2.Name}}'
# msg: 'User {{ users.list.2.Name}} has UID {{ users.list.2.ReclaimablevCPUs}} and GID {{ users.list.2.ReclaimableMemory}}'
# msg: "{{ users }}"
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
Expect to be able to read CSV values by col name (field) as based on module documentation.
##### ACTUAL RESULTS
```
fatal: [localhost]: FAILED! => {"ansible_facts": {"discovered_interpreter_python": "/usr/bin/python"}, "changed": false, "msg": "Key 'Name' was not found in the CSV header fields: Name, Configured-vCPU, ReclaimablevCPUs, ConfiguredMemory, ReclaimableMemory, ParentvCenter"}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/module_utils/csv.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>
4 # Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>
5 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
6 # SPDX-License-Identifier: GPL-3.0-or-later
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11 import csv
12 from io import BytesIO, StringIO
13
14 from ansible.module_utils.common.text.converters import to_native
15 from ansible.module_utils.six import PY3
16
17
18 class CustomDialectFailureError(Exception):
19 pass
20
21
22 class DialectNotAvailableError(Exception):
23 pass
24
25
26 CSVError = csv.Error
27
28
29 def initialize_dialect(dialect, **kwargs):
30 # Add Unix dialect from Python 3
31 class unix_dialect(csv.Dialect):
32 """Describe the usual properties of Unix-generated CSV files."""
33 delimiter = ','
34 quotechar = '"'
35 doublequote = True
36 skipinitialspace = False
37 lineterminator = '\n'
38 quoting = csv.QUOTE_ALL
39
40 csv.register_dialect("unix", unix_dialect)
41
42 if dialect not in csv.list_dialects():
43 raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
44
45 # Create a dictionary from only set options
46 dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
47 if dialect_params:
48 try:
49 csv.register_dialect('custom', dialect, **dialect_params)
50 except TypeError as e:
51 raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
52 dialect = 'custom'
53
54 return dialect
55
56
57 def read_csv(data, dialect, fieldnames=None):
58
59 data = to_native(data, errors='surrogate_or_strict')
60
61 if PY3:
62 fake_fh = StringIO(data)
63 else:
64 fake_fh = BytesIO(data)
65
66 reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
67
68 return reader
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py
--- a/plugins/module_utils/csv.py
+++ b/plugins/module_utils/csv.py
@@ -55,8 +55,10 @@
def read_csv(data, dialect, fieldnames=None):
-
+ BOM = to_native(u'\ufeff')
data = to_native(data, errors='surrogate_or_strict')
+ if data.startswith(BOM):
+ data = data[len(BOM):]
if PY3:
fake_fh = StringIO(data)
| {"golden_diff": "diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py\n--- a/plugins/module_utils/csv.py\n+++ b/plugins/module_utils/csv.py\n@@ -55,8 +55,10 @@\n \n \n def read_csv(data, dialect, fieldnames=None):\n-\n+ BOM = to_native(u'\\ufeff')\n data = to_native(data, errors='surrogate_or_strict')\n+ if data.startswith(BOM):\n+ data = data[len(BOM):]\n \n if PY3:\n fake_fh = StringIO(data)\n", "issue": "read_csv - Key 'Name' was not found in the CSV header fields\n##### SUMMARY\r\nThe `read_csv` module fails to identify a field, yet displaces the field in the list of available fields.\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nread_csv\r\n\r\n##### ANSIBLE VERSION\r\n\r\n```\r\nansible 2.9.10\r\n config file = /home/anton/git/ansible-deploy-vmware-vm/ansible.cfg\r\n configured module search path = ['/home/anton/git/ansible-deploy-vmware-vm/library']\r\n ansible python module location = /home/anton/.local/lib/python3.6/site-packages/ansible\r\n executable location = /home/anton/.local/bin/ansible\r\n python version = 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0]\r\n\r\n```\r\n\r\n##### CONFIGURATION\r\n\r\n```\r\n# config file for ansible -- http://ansible.com/\r\n# ==============================================\r\n\r\n# nearly all parameters can be overridden in ansible-playbook\r\n# or with command line flags. ansible will read ANSIBLE_CONFIG,\r\n# ansible.cfg in the current working directory, .ansible.cfg in\r\n# the home directory or /etc/ansible/ansible.cfg, whichever it\r\n# finds first\r\n\r\n[defaults]\r\nhost_key_checking = False\r\nhost_key_check = False\r\nansible_python_interpreter=/usr/bin/python3\r\nlog_path = ./ansible.log\r\n#bin_ansible_callbacks=True\r\n#stdout_callback = debug\r\n\r\n\r\n# some basic default values...\r\nlibrary = ./library\r\n\r\n# additional paths to search for roles in, colon separated\r\nroles_path = ./roles\r\n\r\n[ssh_connection]\r\n# ssh arguments to use\r\nssh_args = -o StrictHostKeyChecking=no\r\ntimeout=60\r\n\r\n\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\nUbuntu 20:04\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```\r\n---\r\n- name: Right-size VMs\r\n gather_facts: false\r\n hosts: all\r\n connection: local\r\n tasks:\r\n # Read a CSV file and access the first item\r\n - name: Read users from CSV file and return a list\r\n read_csv:\r\n path: \"files/vms/6-19-20 Optimization Report - Oversized Virtual Machines Prod2.csv\"\r\n key: Name\r\n register: users\r\n\r\n - debug:\r\n msg: 'User {{ users.list.2.Name}}'\r\n # msg: 'User {{ users.list.2.Name}} has UID {{ users.list.2.ReclaimablevCPUs}} and GID {{ users.list.2.ReclaimableMemory}}'\r\n # msg: \"{{ users }}\"\r\n\r\n\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\nExpect to be able to read CSV values by col name (field) as based on module documentation.\r\n\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nfatal: [localhost]: FAILED! => {\"ansible_facts\": {\"discovered_interpreter_python\": \"/usr/bin/python\"}, \"changed\": false, \"msg\": \"Key 'Name' was not found in the CSV header fields: \ufeffName, Configured-vCPU, ReclaimablevCPUs, ConfiguredMemory, ReclaimableMemory, ParentvCenter\"}\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>\n# Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport csv\nfrom io import BytesIO, StringIO\n\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six import PY3\n\n\nclass CustomDialectFailureError(Exception):\n pass\n\n\nclass DialectNotAvailableError(Exception):\n pass\n\n\nCSVError = csv.Error\n\n\ndef initialize_dialect(dialect, **kwargs):\n # Add Unix dialect from Python 3\n class unix_dialect(csv.Dialect):\n \"\"\"Describe the usual properties of Unix-generated CSV files.\"\"\"\n delimiter = ','\n quotechar = '\"'\n doublequote = True\n skipinitialspace = False\n lineterminator = '\\n'\n quoting = csv.QUOTE_ALL\n\n csv.register_dialect(\"unix\", unix_dialect)\n\n if dialect not in csv.list_dialects():\n raise DialectNotAvailableError(\"Dialect '%s' is not supported by your version of python.\" % dialect)\n\n # Create a dictionary from only set options\n dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)\n if dialect_params:\n try:\n csv.register_dialect('custom', dialect, **dialect_params)\n except TypeError as e:\n raise CustomDialectFailureError(\"Unable to create custom dialect: %s\" % to_native(e))\n dialect = 'custom'\n\n return dialect\n\n\ndef read_csv(data, dialect, fieldnames=None):\n\n data = to_native(data, errors='surrogate_or_strict')\n\n if PY3:\n fake_fh = StringIO(data)\n else:\n fake_fh = BytesIO(data)\n\n reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)\n\n return reader\n", "path": "plugins/module_utils/csv.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>\n# Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport csv\nfrom io import BytesIO, StringIO\n\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six import PY3\n\n\nclass CustomDialectFailureError(Exception):\n pass\n\n\nclass DialectNotAvailableError(Exception):\n pass\n\n\nCSVError = csv.Error\n\n\ndef initialize_dialect(dialect, **kwargs):\n # Add Unix dialect from Python 3\n class unix_dialect(csv.Dialect):\n \"\"\"Describe the usual properties of Unix-generated CSV files.\"\"\"\n delimiter = ','\n quotechar = '\"'\n doublequote = True\n skipinitialspace = False\n lineterminator = '\\n'\n quoting = csv.QUOTE_ALL\n\n csv.register_dialect(\"unix\", unix_dialect)\n\n if dialect not in csv.list_dialects():\n raise DialectNotAvailableError(\"Dialect '%s' is not supported by your version of python.\" % dialect)\n\n # Create a dictionary from only set options\n dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)\n if dialect_params:\n try:\n csv.register_dialect('custom', dialect, **dialect_params)\n except TypeError as e:\n raise CustomDialectFailureError(\"Unable to create custom dialect: %s\" % to_native(e))\n dialect = 'custom'\n\n return dialect\n\n\ndef read_csv(data, dialect, fieldnames=None):\n BOM = to_native(u'\\ufeff')\n data = to_native(data, errors='surrogate_or_strict')\n if data.startswith(BOM):\n data = data[len(BOM):]\n\n if PY3:\n fake_fh = StringIO(data)\n else:\n fake_fh = BytesIO(data)\n\n reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)\n\n return reader\n", "path": "plugins/module_utils/csv.py"}]} | 1,631 | 116 |
gh_patches_debug_11630 | rasdani/github-patches | git_diff | mozilla__bugbug-407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not all training tasks need commits DB
Indeed I think none of the ones we currently run as part of the data pipeline need the commits.
We should:
- Make the trainer script only download the DBs which are necessary;
- Remove the dependency on the commit retrieval task in the data-pipeline.yml.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/trainer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import lzma
5 import os
6 import shutil
7 from logging import INFO, basicConfig, getLogger
8 from urllib.request import urlretrieve
9
10 from bugbug.models.component import ComponentModel
11 from bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel
12 from bugbug.models.regression import RegressionModel
13 from bugbug.models.tracking import TrackingModel
14
15 basicConfig(level=INFO)
16 logger = getLogger(__name__)
17
18 BASE_URL = "https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public"
19
20
21 class Trainer(object):
22 def decompress_file(self, path):
23 with lzma.open(f"{path}.xz", "rb") as input_f:
24 with open(path, "wb") as output_f:
25 shutil.copyfileobj(input_f, output_f)
26
27 def compress_file(self, path):
28 with open(path, "rb") as input_f:
29 with lzma.open(f"{path}.xz", "wb") as output_f:
30 shutil.copyfileobj(input_f, output_f)
31
32 def train_defect_enhancement_task(self):
33 logger.info("Training *defect vs enhancement vs task* model")
34 model = DefectEnhancementTaskModel()
35 model.train()
36 self.compress_file("defectenhancementtaskmodel")
37
38 def train_component(self):
39 logger.info("Training *component* model")
40 model = ComponentModel()
41 model.train()
42 self.compress_file("componentmodel")
43
44 def train_regression(self):
45 logger.info("Training *regression vs non-regression* model")
46 model = RegressionModel()
47 model.train()
48 self.compress_file("regressionmodel")
49
50 def train_tracking(self):
51 logger.info("Training *tracking* model")
52 model = TrackingModel()
53 model.train()
54 self.compress_file("trackingmodel")
55
56 def go(self, model):
57 # TODO: Stop hard-coding them
58 valid_models = ["defect", "component", "regression", "tracking"]
59
60 if model not in valid_models:
61 exception = (
62 f"Invalid model {model!r} name, use one of {valid_models!r} instead"
63 )
64 raise ValueError(exception)
65
66 # Download datasets that were built by bugbug_data.
67 os.makedirs("data", exist_ok=True)
68
69 # Bugs.json
70 logger.info("Downloading bugs database")
71 bugs_url = BASE_URL.format("bugs")
72 urlretrieve(f"{bugs_url}/bugs.json.xz", "data/bugs.json.xz")
73 logger.info("Decompressing bugs database")
74 self.decompress_file("data/bugs.json")
75
76 # Commits.json
77 logger.info("Downloading commits database")
78 commits_url = BASE_URL.format("commits")
79 urlretrieve(f"{commits_url}/commits.json.xz", "data/commits.json.xz")
80 logger.info("Decompressing commits database")
81 self.decompress_file("data/commits.json")
82
83 if model == "defect":
84 # Train classifier for defect-vs-enhancement-vs-task.
85 self.train_defect_enhancement_task()
86 elif model == "component":
87 # Train classifier for the component of a bug.
88 self.train_component()
89 elif model == "regression":
90 # Train classifier for regression-vs-nonregression.
91 self.train_regression()
92 elif model == "tracking":
93 # Train classifier for tracking bugs.
94 self.train_tracking()
95 else:
96 # We shouldn't be here
97 raise Exception("valid_models is likely not up-to-date anymore")
98
99
100 def main():
101 description = "Train the models"
102 parser = argparse.ArgumentParser(description=description)
103
104 parser.add_argument("model", help="Which model to train.")
105
106 args = parser.parse_args()
107
108 retriever = Trainer()
109 retriever.go(args.model)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/trainer.py b/scripts/trainer.py
--- a/scripts/trainer.py
+++ b/scripts/trainer.py
@@ -73,13 +73,6 @@
logger.info("Decompressing bugs database")
self.decompress_file("data/bugs.json")
- # Commits.json
- logger.info("Downloading commits database")
- commits_url = BASE_URL.format("commits")
- urlretrieve(f"{commits_url}/commits.json.xz", "data/commits.json.xz")
- logger.info("Decompressing commits database")
- self.decompress_file("data/commits.json")
-
if model == "defect":
# Train classifier for defect-vs-enhancement-vs-task.
self.train_defect_enhancement_task()
| {"golden_diff": "diff --git a/scripts/trainer.py b/scripts/trainer.py\n--- a/scripts/trainer.py\n+++ b/scripts/trainer.py\n@@ -73,13 +73,6 @@\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n \n- # Commits.json\n- logger.info(\"Downloading commits database\")\n- commits_url = BASE_URL.format(\"commits\")\n- urlretrieve(f\"{commits_url}/commits.json.xz\", \"data/commits.json.xz\")\n- logger.info(\"Decompressing commits database\")\n- self.decompress_file(\"data/commits.json\")\n-\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n", "issue": "Not all training tasks need commits DB\nIndeed I think none of the ones we currently run as part of the data pipeline need the commits.\r\nWe should:\r\n- Make the trainer script only download the DBs which are necessary;\r\n- Remove the dependency on the commit retrieval task in the data-pipeline.yml.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport lzma\nimport os\nimport shutil\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\nfrom bugbug.models.tracking import TrackingModel\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n with lzma.open(f\"{path}.xz\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def compress_file(self, path):\n with open(path, \"rb\") as input_f:\n with lzma.open(f\"{path}.xz\", \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def train_defect_enhancement_task(self):\n logger.info(\"Training *defect vs enhancement vs task* model\")\n model = DefectEnhancementTaskModel()\n model.train()\n self.compress_file(\"defectenhancementtaskmodel\")\n\n def train_component(self):\n logger.info(\"Training *component* model\")\n model = ComponentModel()\n model.train()\n self.compress_file(\"componentmodel\")\n\n def train_regression(self):\n logger.info(\"Training *regression vs non-regression* model\")\n model = RegressionModel()\n model.train()\n self.compress_file(\"regressionmodel\")\n\n def train_tracking(self):\n logger.info(\"Training *tracking* model\")\n model = TrackingModel()\n model.train()\n self.compress_file(\"trackingmodel\")\n\n def go(self, model):\n # TODO: Stop hard-coding them\n valid_models = [\"defect\", \"component\", \"regression\", \"tracking\"]\n\n if model not in valid_models:\n exception = (\n f\"Invalid model {model!r} name, use one of {valid_models!r} instead\"\n )\n raise ValueError(exception)\n\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n # Bugs.json\n logger.info(\"Downloading bugs database\")\n bugs_url = BASE_URL.format(\"bugs\")\n urlretrieve(f\"{bugs_url}/bugs.json.xz\", \"data/bugs.json.xz\")\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n\n # Commits.json\n logger.info(\"Downloading commits database\")\n commits_url = BASE_URL.format(\"commits\")\n urlretrieve(f\"{commits_url}/commits.json.xz\", \"data/commits.json.xz\")\n logger.info(\"Decompressing commits database\")\n self.decompress_file(\"data/commits.json\")\n\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n elif model == \"component\":\n # Train classifier for the component of a bug.\n self.train_component()\n elif model == \"regression\":\n # Train classifier for regression-vs-nonregression.\n self.train_regression()\n elif model == \"tracking\":\n # Train classifier for tracking bugs.\n self.train_tracking()\n else:\n # We shouldn't be here\n raise Exception(\"valid_models is likely not up-to-date anymore\")\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport lzma\nimport os\nimport shutil\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\nfrom bugbug.models.tracking import TrackingModel\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n with lzma.open(f\"{path}.xz\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def compress_file(self, path):\n with open(path, \"rb\") as input_f:\n with lzma.open(f\"{path}.xz\", \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def train_defect_enhancement_task(self):\n logger.info(\"Training *defect vs enhancement vs task* model\")\n model = DefectEnhancementTaskModel()\n model.train()\n self.compress_file(\"defectenhancementtaskmodel\")\n\n def train_component(self):\n logger.info(\"Training *component* model\")\n model = ComponentModel()\n model.train()\n self.compress_file(\"componentmodel\")\n\n def train_regression(self):\n logger.info(\"Training *regression vs non-regression* model\")\n model = RegressionModel()\n model.train()\n self.compress_file(\"regressionmodel\")\n\n def train_tracking(self):\n logger.info(\"Training *tracking* model\")\n model = TrackingModel()\n model.train()\n self.compress_file(\"trackingmodel\")\n\n def go(self, model):\n # TODO: Stop hard-coding them\n valid_models = [\"defect\", \"component\", \"regression\", \"tracking\"]\n\n if model not in valid_models:\n exception = (\n f\"Invalid model {model!r} name, use one of {valid_models!r} instead\"\n )\n raise ValueError(exception)\n\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n # Bugs.json\n logger.info(\"Downloading bugs database\")\n bugs_url = BASE_URL.format(\"bugs\")\n urlretrieve(f\"{bugs_url}/bugs.json.xz\", \"data/bugs.json.xz\")\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n elif model == \"component\":\n # Train classifier for the component of a bug.\n self.train_component()\n elif model == \"regression\":\n # Train classifier for regression-vs-nonregression.\n self.train_regression()\n elif model == \"tracking\":\n # Train classifier for tracking bugs.\n self.train_tracking()\n else:\n # We shouldn't be here\n raise Exception(\"valid_models is likely not up-to-date anymore\")\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}]} | 1,363 | 173 |
gh_patches_debug_10182 | rasdani/github-patches | git_diff | getredash__redash-998 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Embed query description appearing larger than vizualization name
The query description is appearing larger then the visualization name:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/embed.py`
Content:
```
1 import json
2
3 from funcy import project
4 from flask import render_template, request
5 from flask_login import login_required, current_user
6 from flask_restful import abort
7
8 from redash import models, settings
9 from redash import serializers
10 from redash.utils import json_dumps
11 from redash.handlers import routes
12 from redash.handlers.base import org_scoped_rule
13 from redash.permissions import require_access, view_only
14 from authentication import current_org
15
16
17 @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
18 @login_required
19 def embed(query_id, visualization_id, org_slug=None):
20 # TODO: add event for embed access
21 query = models.Query.get_by_id_and_org(query_id, current_org)
22 require_access(query.groups, current_user, view_only)
23 vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
24 qr = {}
25
26 if vis is not None:
27 vis = vis.to_dict()
28 qr = query.latest_query_data
29 if qr is None:
30 abort(400, message="No Results for this query")
31 else:
32 qr = qr.to_dict()
33 else:
34 abort(404, message="Visualization not found.")
35
36 client_config = {}
37 client_config.update(settings.COMMON_CLIENT_CONFIG)
38
39 qr = project(qr, ('data', 'id', 'retrieved_at'))
40 vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))
41 vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
42
43 return render_template("embed.html",
44
45 client_config=json_dumps(client_config),
46 visualization=json_dumps(vis),
47 query_result=json_dumps(qr))
48
49
50 @routes.route(org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])
51 @login_required
52 def public_dashboard(token, org_slug=None):
53 # TODO: verify object is a dashboard?
54 if not isinstance(current_user, models.ApiUser):
55 api_key = models.ApiKey.get_by_api_key(token)
56 dashboard = api_key.object
57 else:
58 dashboard = current_user.object
59
60 user = {
61 'permissions': [],
62 'apiKey': current_user.id
63 }
64
65 headers = {
66 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'
67 }
68
69 response = render_template("public.html",
70 headless='embed' in request.args,
71 user=json.dumps(user),
72 seed_data=json_dumps({
73 'dashboard': serializers.public_dashboard(dashboard)
74 }),
75 client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))
76
77 return response, 200, headers
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py
--- a/redash/handlers/embed.py
+++ b/redash/handlers/embed.py
@@ -41,7 +41,6 @@
vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
return render_template("embed.html",
-
client_config=json_dumps(client_config),
visualization=json_dumps(vis),
query_result=json_dumps(qr))
| {"golden_diff": "diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py\n--- a/redash/handlers/embed.py\n+++ b/redash/handlers/embed.py\n@@ -41,7 +41,6 @@\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n \n return render_template(\"embed.html\",\n-\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n", "issue": "Embed query description appearing larger than vizualization name\nThe query description is appearing larger then the visualization name:\n\n\n\n", "before_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}], "after_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}]} | 1,097 | 124 |
gh_patches_debug_12620 | rasdani/github-patches | git_diff | kivy__kivy-5187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kivy breaks Clipboard
### Versions
* Python: 2.7.12
* OS: Windows 10
* Kivy: 1.9.2-dev0
* Kivy installation method: wheel
### Description
When pasting some data into a `TextInput`, the clipboard breaks across the system, and copying and pasting is not possible until the Kivy app is terminated. Specifically, I found the following steps to reproduce the problem:
1. Try copying a file into the `TextInput` box (nothing will paste in as expected)
2. Try copying some text somewhere else (does not have to be in the `TextInput`)
After step 1, nothing is copied or pasted and the Kivy application must be terminated before the clipboard starts working again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/clipboard/clipboard_winctypes.py`
Content:
```
1 '''
2 Clipboard windows: an implementation of the Clipboard using ctypes.
3 '''
4
5 __all__ = ('ClipboardWindows', )
6
7 from kivy.utils import platform
8 from kivy.core.clipboard import ClipboardBase
9
10 if platform != 'win':
11 raise SystemError('unsupported platform for Windows clipboard')
12
13 import ctypes
14 from ctypes import wintypes
15 user32 = ctypes.windll.user32
16 kernel32 = ctypes.windll.kernel32
17 msvcrt = ctypes.cdll.msvcrt
18 c_char_p = ctypes.c_char_p
19 c_wchar_p = ctypes.c_wchar_p
20
21
22 class ClipboardWindows(ClipboardBase):
23
24 def get(self, mimetype='text/plain'):
25 GetClipboardData = user32.GetClipboardData
26 GetClipboardData.argtypes = [wintypes.UINT]
27 GetClipboardData.restype = wintypes.HANDLE
28
29 user32.OpenClipboard(user32.GetActiveWindow())
30 # 1 is CF_TEXT
31 pcontents = GetClipboardData(13)
32 if not pcontents:
33 return ''
34 data = c_wchar_p(pcontents).value.encode(self._encoding)
35 user32.CloseClipboard()
36 return data
37
38 def put(self, text, mimetype='text/plain'):
39 text = text.decode(self._encoding) # auto converted later
40 text += u'\x00'
41
42 SetClipboardData = user32.SetClipboardData
43 SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]
44 SetClipboardData.restype = wintypes.HANDLE
45
46 GlobalAlloc = kernel32.GlobalAlloc
47 GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]
48 GlobalAlloc.restype = wintypes.HGLOBAL
49
50 CF_UNICODETEXT = 13
51
52 user32.OpenClipboard(user32.GetActiveWindow())
53 user32.EmptyClipboard()
54 hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))
55 msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))
56 SetClipboardData(CF_UNICODETEXT, hCd)
57 user32.CloseClipboard()
58
59 def get_types(self):
60 return ['text/plain']
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py
--- a/kivy/core/clipboard/clipboard_winctypes.py
+++ b/kivy/core/clipboard/clipboard_winctypes.py
@@ -27,9 +27,13 @@
GetClipboardData.restype = wintypes.HANDLE
user32.OpenClipboard(user32.GetActiveWindow())
- # 1 is CF_TEXT
+ # Standard Clipboard Format "1" is "CF_TEXT"
pcontents = GetClipboardData(13)
+
+ # if someone pastes a FILE, the content is None for SCF 13
+ # and the clipboard is locked if not closed properly
if not pcontents:
+ user32.CloseClipboard()
return ''
data = c_wchar_p(pcontents).value.encode(self._encoding)
user32.CloseClipboard()
| {"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py\n--- a/kivy/core/clipboard/clipboard_winctypes.py\n+++ b/kivy/core/clipboard/clipboard_winctypes.py\n@@ -27,9 +27,13 @@\n GetClipboardData.restype = wintypes.HANDLE\n \n user32.OpenClipboard(user32.GetActiveWindow())\n- # 1 is CF_TEXT\n+ # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n+\n+ # if someone pastes a FILE, the content is None for SCF 13\n+ # and the clipboard is locked if not closed properly\n if not pcontents:\n+ user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n", "issue": "Kivy breaks Clipboard\n### Versions\r\n\r\n* Python: 2.7.12\r\n* OS: Windows 10\r\n* Kivy: 1.9.2-dev0\r\n* Kivy installation method: wheel\r\n\r\n### Description\r\n\r\nWhen pasting some data into a `TextInput`, the clipboard breaks across the system, and copying and pasting is not possible until the Kivy app is terminated. Specifically, I found the following steps to reproduce the problem:\r\n1. Try copying a file into the `TextInput` box (nothing will paste in as expected)\r\n2. Try copying some text somewhere else (does not have to be in the `TextInput`)\r\n\r\nAfter step 1, nothing is copied or pasted and the Kivy application must be terminated before the clipboard starts working again.\n", "before_files": [{"content": "'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # 1 is CF_TEXT\n pcontents = GetClipboardData(13)\n if not pcontents:\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n", "path": "kivy/core/clipboard/clipboard_winctypes.py"}], "after_files": [{"content": "'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n\n # if someone pastes a FILE, the content is None for SCF 13\n # and the clipboard is locked if not closed properly\n if not pcontents:\n user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n", "path": "kivy/core/clipboard/clipboard_winctypes.py"}]} | 1,026 | 204 |
gh_patches_debug_5831 | rasdani/github-patches | git_diff | sherlock-project__sherlock-139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sites sorting
It may be a good idea to sort the sites in sites.md and data.json alphabetically. When I'm looking for sites to add, I always have to Ctrl+F in this repo or just scroll through the file... Also when seeing the results, it's just chaos.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `site_list.py`
Content:
```
1 """Sherlock: Supported Site Listing
2
3 This module generates the listing of supported sites.
4 """
5 import json
6
7 with open("data.json", "r", encoding="utf-8") as data_file:
8 data = json.load(data_file)
9
10 with open("sites.md", "w") as site_file:
11 site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\n')
12
13 index = 1
14 for social_network in data:
15 url_main = data.get(social_network).get("urlMain")
16 site_file.write(f'{index}. [{social_network}]({url_main})\n')
17 index = index + 1
18
19 print("Finished updating supported site listing!")
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/site_list.py b/site_list.py
--- a/site_list.py
+++ b/site_list.py
@@ -7,6 +7,11 @@
with open("data.json", "r", encoding="utf-8") as data_file:
data = json.load(data_file)
+sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
+
+with open("data.json", "w") as data_file:
+ data_file.write(sorted_json_data)
+
with open("sites.md", "w") as site_file:
site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\n')
| {"golden_diff": "diff --git a/site_list.py b/site_list.py\n--- a/site_list.py\n+++ b/site_list.py\n@@ -7,6 +7,11 @@\n with open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n \n+sorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n+\n+with open(\"data.json\", \"w\") as data_file:\n+ data_file.write(sorted_json_data)\n+\n with open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n", "issue": "Sites sorting\nIt may be a good idea to sort the sites in sites.md and data.json alphabetically. When I'm looking for sites to add, I always have to Ctrl+F in this repo or just scroll through the file... Also when seeing the results, it's just chaos.\n", "before_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\n\nThis module generates the listing of supported sites.\n\"\"\"\nimport json\n\nwith open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nwith open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n\n index = 1\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n index = index + 1\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}], "after_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\n\nThis module generates the listing of supported sites.\n\"\"\"\nimport json\n\nwith open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nsorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n\nwith open(\"data.json\", \"w\") as data_file:\n data_file.write(sorted_json_data)\n\nwith open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n\n index = 1\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n index = index + 1\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}]} | 497 | 140 |
gh_patches_debug_7015 | rasdani/github-patches | git_diff | ibis-project__ibis-798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImpalaTable.describe_formatted may throw an exception
excerpt. Will investigate soon
``` python
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-115-87d40e40503e> in <module>()
----> 1 repr(db.csv_as_parquet.describe_formatted())
/home/wesm/code/cloudera/ibis/ibis/impala/metadata.py in __repr__(self)
295 data['partition schema'] = self.partitions
296
--> 297 pprint.pprint(data, stream=buf)
298
299 return buf.getvalue()
/home/wesm/anaconda3/lib/python3.5/pprint.py in pprint(object, stream, indent, width, depth, compact)
51 stream=stream, indent=indent, width=width, depth=depth,
52 compact=compact)
---> 53 printer.pprint(object)
54
55 def pformat(object, indent=1, width=80, depth=None, *, compact=False):
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/impala/metadata.py`
Content:
```
1 # Copyright 2014 Cloudera Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from six import StringIO
16 import pandas as pd
17
18
19 def parse_metadata(descr_table):
20 parser = MetadataParser(descr_table)
21 return parser.parse()
22
23
24 def _noop(tup):
25 return None
26
27
28 def _item_converter(i):
29 def _get_item(converter=None):
30 def _converter(tup):
31 result = tup[i]
32 if converter is not None:
33 result = converter(result)
34 return result
35
36 return _converter
37
38 return _get_item
39
40 _get_type = _item_converter(1)
41 _get_comment = _item_converter(2)
42
43
44 def _try_timestamp(x):
45 try:
46 return pd.Timestamp(x)
47 except (ValueError, TypeError):
48 return x
49
50
51 def _try_unix_timestamp(x):
52 try:
53 return pd.Timestamp.fromtimestamp(int(x))
54 except (ValueError, TypeError):
55 return x
56
57
58 def _try_boolean(x):
59 try:
60 x = x.lower()
61 if x in ('true', 'yes'):
62 return True
63 elif x in ('false', 'no'):
64 return False
65 return x
66 except (ValueError, TypeError):
67 return x
68
69
70 def _try_int(x):
71 try:
72 return int(x)
73 except (ValueError, TypeError):
74 return x
75
76
77 class MetadataParser(object):
78
79 """
80 A simple state-ish machine to parse the results of DESCRIBE FORMATTED
81 """
82
83 def __init__(self, table):
84 self.table = table
85 self.tuples = list(self.table.itertuples(index=False))
86
87 def _reset(self):
88 self.pos = 0
89 self.schema = None
90 self.partitions = None
91 self.info = None
92 self.storage = None
93
94 def _next_tuple(self):
95 if self.pos == len(self.tuples):
96 raise StopIteration
97
98 result = self.tuples[self.pos]
99 self.pos += 1
100 return result
101
102 def parse(self):
103 self._reset()
104 self._parse()
105
106 return TableMetadata(self.schema, self.info, self.storage,
107 partitions=self.partitions)
108
109 def _parse(self):
110 self.schema = self._parse_schema()
111
112 next_section = self._next_tuple()
113 if 'partition' in next_section[0].lower():
114 self._parse_partitions()
115 else:
116 self._parse_info()
117
118 def _parse_partitions(self):
119 self.partitions = self._parse_schema()
120
121 next_section = self._next_tuple()
122 if 'table information' not in next_section[0].lower():
123 raise ValueError('Table information not present')
124
125 self._parse_info()
126
127 def _parse_schema(self):
128 tup = self._next_tuple()
129 if 'col_name' not in tup[0]:
130 raise ValueError('DESCRIBE FORMATTED did not return '
131 'the expected results: {0}'
132 .format(tup))
133 self._next_tuple()
134
135 # Use for both main schema and partition schema (if any)
136 schema = []
137 while True:
138 tup = self._next_tuple()
139 if tup[0].strip() == '':
140 break
141 schema.append((tup[0], tup[1]))
142
143 return schema
144
145 def _parse_info(self):
146 self.info = {}
147 while True:
148 tup = self._next_tuple()
149 orig_key = tup[0].strip(':')
150 key = _clean_param_name(tup[0])
151
152 if key == '' or key.startswith('#'):
153 # section is done
154 break
155
156 if key == 'table parameters':
157 self._parse_table_parameters()
158 elif key in self._info_cleaners:
159 result = self._info_cleaners[key](tup)
160 self.info[orig_key] = result
161 else:
162 self.info[orig_key] = tup[1]
163
164 if 'storage information' not in key:
165 raise ValueError('Storage information not present')
166
167 self._parse_storage_info()
168
169 _info_cleaners = {
170 'database': _get_type(),
171 'owner': _get_type(),
172 'createtime': _get_type(_try_timestamp),
173 'lastaccesstime': _get_type(_try_timestamp),
174 'protect mode': _get_type(),
175 'retention': _get_type(_try_int),
176 'location': _get_type(),
177 'table type': _get_type()
178 }
179
180 def _parse_table_parameters(self):
181 params = self._parse_nested_params(self._table_param_cleaners)
182 self.info['Table Parameters'] = params
183
184 _table_param_cleaners = {
185 'external': _try_boolean,
186 'column_stats_accurate': _try_boolean,
187 'numfiles': _try_int,
188 'totalsize': _try_int,
189 'stats_generated_via_stats_task': _try_boolean,
190 'numrows': _try_int,
191 'transient_lastddltime': _try_unix_timestamp,
192 }
193
194 def _parse_storage_info(self):
195 self.storage = {}
196 while True:
197 # end of the road
198 try:
199 tup = self._next_tuple()
200 except StopIteration:
201 break
202
203 orig_key = tup[0].strip(':')
204 key = _clean_param_name(tup[0])
205
206 if key == '' or key.startswith('#'):
207 # section is done
208 break
209
210 if key == 'storage desc params':
211 self._parse_storage_desc_params()
212 elif key in self._storage_cleaners:
213 result = self._storage_cleaners[key](tup)
214 self.storage[orig_key] = result
215 else:
216 self.storage[orig_key] = tup[1]
217
218 _storage_cleaners = {
219 'compressed': _get_type(_try_boolean),
220 'num buckets': _get_type(_try_int),
221 }
222
223 def _parse_storage_desc_params(self):
224 params = self._parse_nested_params(self._storage_param_cleaners)
225 self.storage['Desc Params'] = params
226
227 _storage_param_cleaners = {}
228
229 def _parse_nested_params(self, cleaners):
230 params = {}
231 while True:
232 try:
233 tup = self._next_tuple()
234 except StopIteration:
235 break
236 if pd.isnull(tup[1]):
237 break
238
239 key, value = tup[1:]
240
241 if key.lower() in cleaners:
242 cleaner = cleaners[key.lower()]
243 value = cleaner(value)
244 params[key] = value
245
246 return params
247
248
249 def _clean_param_name(x):
250 return x.strip().strip(':').lower()
251
252
253 def _get_meta(attr, key):
254 @property
255 def f(self):
256 data = getattr(self, attr)
257 if isinstance(key, list):
258 result = data
259 for k in key:
260 if k not in result:
261 raise KeyError(k)
262 result = result[k]
263 return result
264 else:
265 return data[key]
266 return f
267
268
269 class TableMetadata(object):
270
271 """
272 Container for the parsed and wrangled results of DESCRIBE FORMATTED for
273 easier Ibis use (and testing).
274 """
275 def __init__(self, schema, info, storage, partitions=None):
276 self.schema = schema
277 self.info = info
278 self.storage = storage
279 self.partitions = partitions
280
281 def __repr__(self):
282 import pprint
283
284 # Quick and dirty for now
285 buf = StringIO()
286 buf.write(str(type(self)))
287 buf.write('\n')
288
289 data = {
290 'schema': self.schema,
291 'info': self.info,
292 'storage info': self.storage
293 }
294 if self.partitions is not None:
295 data['partition schema'] = self.partitions
296
297 pprint.pprint(data, stream=buf)
298
299 return buf.getvalue()
300
301 @property
302 def is_partitioned(self):
303 return self.partitions is not None
304
305 create_time = _get_meta('info', 'CreateTime')
306 location = _get_meta('info', 'Location')
307 owner = _get_meta('info', 'Owner')
308 num_rows = _get_meta('info', ['Table Parameters', 'numRows'])
309 hive_format = _get_meta('storage', 'InputFormat')
310
311 tbl_properties = _get_meta('info', 'Table Parameters')
312 serde_properties = _get_meta('storage', 'Desc Params')
313
314
315 class TableInfo(object):
316 pass
317
318
319 class TableStorageInfo(object):
320 pass
321
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/impala/metadata.py b/ibis/impala/metadata.py
--- a/ibis/impala/metadata.py
+++ b/ibis/impala/metadata.py
@@ -43,14 +43,16 @@
def _try_timestamp(x):
try:
- return pd.Timestamp(x)
+ ts = pd.Timestamp(x)
+ return ts.to_pydatetime()
except (ValueError, TypeError):
return x
def _try_unix_timestamp(x):
try:
- return pd.Timestamp.fromtimestamp(int(x))
+ ts = pd.Timestamp.fromtimestamp(int(x))
+ return ts.to_pydatetime()
except (ValueError, TypeError):
return x
| {"golden_diff": "diff --git a/ibis/impala/metadata.py b/ibis/impala/metadata.py\n--- a/ibis/impala/metadata.py\n+++ b/ibis/impala/metadata.py\n@@ -43,14 +43,16 @@\n \n def _try_timestamp(x):\n try:\n- return pd.Timestamp(x)\n+ ts = pd.Timestamp(x)\n+ return ts.to_pydatetime()\n except (ValueError, TypeError):\n return x\n \n \n def _try_unix_timestamp(x):\n try:\n- return pd.Timestamp.fromtimestamp(int(x))\n+ ts = pd.Timestamp.fromtimestamp(int(x))\n+ return ts.to_pydatetime()\n except (ValueError, TypeError):\n return x\n", "issue": "ImpalaTable.describe_formatted may throw an exception\nexcerpt. Will investigate soon\n\n``` python\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-115-87d40e40503e> in <module>()\n----> 1 repr(db.csv_as_parquet.describe_formatted())\n\n/home/wesm/code/cloudera/ibis/ibis/impala/metadata.py in __repr__(self)\n 295 data['partition schema'] = self.partitions\n 296 \n--> 297 pprint.pprint(data, stream=buf)\n 298 \n 299 return buf.getvalue()\n\n/home/wesm/anaconda3/lib/python3.5/pprint.py in pprint(object, stream, indent, width, depth, compact)\n 51 stream=stream, indent=indent, width=width, depth=depth,\n 52 compact=compact)\n---> 53 printer.pprint(object)\n 54 \n 55 def pformat(object, indent=1, width=80, depth=None, *, compact=False):\n```\n\n", "before_files": [{"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom six import StringIO\nimport pandas as pd\n\n\ndef parse_metadata(descr_table):\n parser = MetadataParser(descr_table)\n return parser.parse()\n\n\ndef _noop(tup):\n return None\n\n\ndef _item_converter(i):\n def _get_item(converter=None):\n def _converter(tup):\n result = tup[i]\n if converter is not None:\n result = converter(result)\n return result\n\n return _converter\n\n return _get_item\n\n_get_type = _item_converter(1)\n_get_comment = _item_converter(2)\n\n\ndef _try_timestamp(x):\n try:\n return pd.Timestamp(x)\n except (ValueError, TypeError):\n return x\n\n\ndef _try_unix_timestamp(x):\n try:\n return pd.Timestamp.fromtimestamp(int(x))\n except (ValueError, TypeError):\n return x\n\n\ndef _try_boolean(x):\n try:\n x = x.lower()\n if x in ('true', 'yes'):\n return True\n elif x in ('false', 'no'):\n return False\n return x\n except (ValueError, TypeError):\n return x\n\n\ndef _try_int(x):\n try:\n return int(x)\n except (ValueError, TypeError):\n return x\n\n\nclass MetadataParser(object):\n\n \"\"\"\n A simple state-ish machine to parse the results of DESCRIBE FORMATTED\n \"\"\"\n\n def __init__(self, table):\n self.table = table\n self.tuples = list(self.table.itertuples(index=False))\n\n def _reset(self):\n self.pos = 0\n self.schema = None\n self.partitions = None\n self.info = None\n self.storage = None\n\n def _next_tuple(self):\n if self.pos == len(self.tuples):\n raise StopIteration\n\n result = self.tuples[self.pos]\n self.pos += 1\n return result\n\n def parse(self):\n self._reset()\n self._parse()\n\n return TableMetadata(self.schema, self.info, self.storage,\n partitions=self.partitions)\n\n def _parse(self):\n self.schema = self._parse_schema()\n\n next_section = self._next_tuple()\n if 'partition' in next_section[0].lower():\n self._parse_partitions()\n else:\n self._parse_info()\n\n def _parse_partitions(self):\n self.partitions = self._parse_schema()\n\n next_section = self._next_tuple()\n if 'table information' not in next_section[0].lower():\n raise ValueError('Table information not present')\n\n self._parse_info()\n\n def _parse_schema(self):\n tup = self._next_tuple()\n if 'col_name' not in tup[0]:\n raise ValueError('DESCRIBE FORMATTED did not return '\n 'the expected results: {0}'\n .format(tup))\n self._next_tuple()\n\n # Use for both main schema and partition schema (if any)\n schema = []\n while True:\n tup = self._next_tuple()\n if tup[0].strip() == '':\n break\n schema.append((tup[0], tup[1]))\n\n return schema\n\n def _parse_info(self):\n self.info = {}\n while True:\n tup = self._next_tuple()\n orig_key = tup[0].strip(':')\n key = _clean_param_name(tup[0])\n\n if key == '' or key.startswith('#'):\n # section is done\n break\n\n if key == 'table parameters':\n self._parse_table_parameters()\n elif key in self._info_cleaners:\n result = self._info_cleaners[key](tup)\n self.info[orig_key] = result\n else:\n self.info[orig_key] = tup[1]\n\n if 'storage information' not in key:\n raise ValueError('Storage information not present')\n\n self._parse_storage_info()\n\n _info_cleaners = {\n 'database': _get_type(),\n 'owner': _get_type(),\n 'createtime': _get_type(_try_timestamp),\n 'lastaccesstime': _get_type(_try_timestamp),\n 'protect mode': _get_type(),\n 'retention': _get_type(_try_int),\n 'location': _get_type(),\n 'table type': _get_type()\n }\n\n def _parse_table_parameters(self):\n params = self._parse_nested_params(self._table_param_cleaners)\n self.info['Table Parameters'] = params\n\n _table_param_cleaners = {\n 'external': _try_boolean,\n 'column_stats_accurate': _try_boolean,\n 'numfiles': _try_int,\n 'totalsize': _try_int,\n 'stats_generated_via_stats_task': _try_boolean,\n 'numrows': _try_int,\n 'transient_lastddltime': _try_unix_timestamp,\n }\n\n def _parse_storage_info(self):\n self.storage = {}\n while True:\n # end of the road\n try:\n tup = self._next_tuple()\n except StopIteration:\n break\n\n orig_key = tup[0].strip(':')\n key = _clean_param_name(tup[0])\n\n if key == '' or key.startswith('#'):\n # section is done\n break\n\n if key == 'storage desc params':\n self._parse_storage_desc_params()\n elif key in self._storage_cleaners:\n result = self._storage_cleaners[key](tup)\n self.storage[orig_key] = result\n else:\n self.storage[orig_key] = tup[1]\n\n _storage_cleaners = {\n 'compressed': _get_type(_try_boolean),\n 'num buckets': _get_type(_try_int),\n }\n\n def _parse_storage_desc_params(self):\n params = self._parse_nested_params(self._storage_param_cleaners)\n self.storage['Desc Params'] = params\n\n _storage_param_cleaners = {}\n\n def _parse_nested_params(self, cleaners):\n params = {}\n while True:\n try:\n tup = self._next_tuple()\n except StopIteration:\n break\n if pd.isnull(tup[1]):\n break\n\n key, value = tup[1:]\n\n if key.lower() in cleaners:\n cleaner = cleaners[key.lower()]\n value = cleaner(value)\n params[key] = value\n\n return params\n\n\ndef _clean_param_name(x):\n return x.strip().strip(':').lower()\n\n\ndef _get_meta(attr, key):\n @property\n def f(self):\n data = getattr(self, attr)\n if isinstance(key, list):\n result = data\n for k in key:\n if k not in result:\n raise KeyError(k)\n result = result[k]\n return result\n else:\n return data[key]\n return f\n\n\nclass TableMetadata(object):\n\n \"\"\"\n Container for the parsed and wrangled results of DESCRIBE FORMATTED for\n easier Ibis use (and testing).\n \"\"\"\n def __init__(self, schema, info, storage, partitions=None):\n self.schema = schema\n self.info = info\n self.storage = storage\n self.partitions = partitions\n\n def __repr__(self):\n import pprint\n\n # Quick and dirty for now\n buf = StringIO()\n buf.write(str(type(self)))\n buf.write('\\n')\n\n data = {\n 'schema': self.schema,\n 'info': self.info,\n 'storage info': self.storage\n }\n if self.partitions is not None:\n data['partition schema'] = self.partitions\n\n pprint.pprint(data, stream=buf)\n\n return buf.getvalue()\n\n @property\n def is_partitioned(self):\n return self.partitions is not None\n\n create_time = _get_meta('info', 'CreateTime')\n location = _get_meta('info', 'Location')\n owner = _get_meta('info', 'Owner')\n num_rows = _get_meta('info', ['Table Parameters', 'numRows'])\n hive_format = _get_meta('storage', 'InputFormat')\n\n tbl_properties = _get_meta('info', 'Table Parameters')\n serde_properties = _get_meta('storage', 'Desc Params')\n\n\nclass TableInfo(object):\n pass\n\n\nclass TableStorageInfo(object):\n pass\n", "path": "ibis/impala/metadata.py"}], "after_files": [{"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom six import StringIO\nimport pandas as pd\n\n\ndef parse_metadata(descr_table):\n parser = MetadataParser(descr_table)\n return parser.parse()\n\n\ndef _noop(tup):\n return None\n\n\ndef _item_converter(i):\n def _get_item(converter=None):\n def _converter(tup):\n result = tup[i]\n if converter is not None:\n result = converter(result)\n return result\n\n return _converter\n\n return _get_item\n\n_get_type = _item_converter(1)\n_get_comment = _item_converter(2)\n\n\ndef _try_timestamp(x):\n try:\n ts = pd.Timestamp(x)\n return ts.to_pydatetime()\n except (ValueError, TypeError):\n return x\n\n\ndef _try_unix_timestamp(x):\n try:\n ts = pd.Timestamp.fromtimestamp(int(x))\n return ts.to_pydatetime()\n except (ValueError, TypeError):\n return x\n\n\ndef _try_boolean(x):\n try:\n x = x.lower()\n if x in ('true', 'yes'):\n return True\n elif x in ('false', 'no'):\n return False\n return x\n except (ValueError, TypeError):\n return x\n\n\ndef _try_int(x):\n try:\n return int(x)\n except (ValueError, TypeError):\n return x\n\n\nclass MetadataParser(object):\n\n \"\"\"\n A simple state-ish machine to parse the results of DESCRIBE FORMATTED\n \"\"\"\n\n def __init__(self, table):\n self.table = table\n self.tuples = list(self.table.itertuples(index=False))\n\n def _reset(self):\n self.pos = 0\n self.schema = None\n self.partitions = None\n self.info = None\n self.storage = None\n\n def _next_tuple(self):\n if self.pos == len(self.tuples):\n raise StopIteration\n\n result = self.tuples[self.pos]\n self.pos += 1\n return result\n\n def parse(self):\n self._reset()\n self._parse()\n\n return TableMetadata(self.schema, self.info, self.storage,\n partitions=self.partitions)\n\n def _parse(self):\n self.schema = self._parse_schema()\n\n next_section = self._next_tuple()\n if 'partition' in next_section[0].lower():\n self._parse_partitions()\n else:\n self._parse_info()\n\n def _parse_partitions(self):\n self.partitions = self._parse_schema()\n\n next_section = self._next_tuple()\n if 'table information' not in next_section[0].lower():\n raise ValueError('Table information not present')\n\n self._parse_info()\n\n def _parse_schema(self):\n tup = self._next_tuple()\n if 'col_name' not in tup[0]:\n raise ValueError('DESCRIBE FORMATTED did not return '\n 'the expected results: {0}'\n .format(tup))\n self._next_tuple()\n\n # Use for both main schema and partition schema (if any)\n schema = []\n while True:\n tup = self._next_tuple()\n if tup[0].strip() == '':\n break\n schema.append((tup[0], tup[1]))\n\n return schema\n\n def _parse_info(self):\n self.info = {}\n while True:\n tup = self._next_tuple()\n orig_key = tup[0].strip(':')\n key = _clean_param_name(tup[0])\n\n if key == '' or key.startswith('#'):\n # section is done\n break\n\n if key == 'table parameters':\n self._parse_table_parameters()\n elif key in self._info_cleaners:\n result = self._info_cleaners[key](tup)\n self.info[orig_key] = result\n else:\n self.info[orig_key] = tup[1]\n\n if 'storage information' not in key:\n raise ValueError('Storage information not present')\n\n self._parse_storage_info()\n\n _info_cleaners = {\n 'database': _get_type(),\n 'owner': _get_type(),\n 'createtime': _get_type(_try_timestamp),\n 'lastaccesstime': _get_type(_try_timestamp),\n 'protect mode': _get_type(),\n 'retention': _get_type(_try_int),\n 'location': _get_type(),\n 'table type': _get_type()\n }\n\n def _parse_table_parameters(self):\n params = self._parse_nested_params(self._table_param_cleaners)\n self.info['Table Parameters'] = params\n\n _table_param_cleaners = {\n 'external': _try_boolean,\n 'column_stats_accurate': _try_boolean,\n 'numfiles': _try_int,\n 'totalsize': _try_int,\n 'stats_generated_via_stats_task': _try_boolean,\n 'numrows': _try_int,\n 'transient_lastddltime': _try_unix_timestamp,\n }\n\n def _parse_storage_info(self):\n self.storage = {}\n while True:\n # end of the road\n try:\n tup = self._next_tuple()\n except StopIteration:\n break\n\n orig_key = tup[0].strip(':')\n key = _clean_param_name(tup[0])\n\n if key == '' or key.startswith('#'):\n # section is done\n break\n\n if key == 'storage desc params':\n self._parse_storage_desc_params()\n elif key in self._storage_cleaners:\n result = self._storage_cleaners[key](tup)\n self.storage[orig_key] = result\n else:\n self.storage[orig_key] = tup[1]\n\n _storage_cleaners = {\n 'compressed': _get_type(_try_boolean),\n 'num buckets': _get_type(_try_int),\n }\n\n def _parse_storage_desc_params(self):\n params = self._parse_nested_params(self._storage_param_cleaners)\n self.storage['Desc Params'] = params\n\n _storage_param_cleaners = {}\n\n def _parse_nested_params(self, cleaners):\n params = {}\n while True:\n try:\n tup = self._next_tuple()\n except StopIteration:\n break\n if pd.isnull(tup[1]):\n break\n\n key, value = tup[1:]\n\n if key.lower() in cleaners:\n cleaner = cleaners[key.lower()]\n value = cleaner(value)\n params[key] = value\n\n return params\n\n\ndef _clean_param_name(x):\n return x.strip().strip(':').lower()\n\n\ndef _get_meta(attr, key):\n @property\n def f(self):\n data = getattr(self, attr)\n if isinstance(key, list):\n result = data\n for k in key:\n if k not in result:\n raise KeyError(k)\n result = result[k]\n return result\n else:\n return data[key]\n return f\n\n\nclass TableMetadata(object):\n\n \"\"\"\n Container for the parsed and wrangled results of DESCRIBE FORMATTED for\n easier Ibis use (and testing).\n \"\"\"\n def __init__(self, schema, info, storage, partitions=None):\n self.schema = schema\n self.info = info\n self.storage = storage\n self.partitions = partitions\n\n def __repr__(self):\n import pprint\n\n # Quick and dirty for now\n buf = StringIO()\n buf.write(str(type(self)))\n buf.write('\\n')\n\n data = {\n 'schema': self.schema,\n 'info': self.info,\n 'storage info': self.storage\n }\n if self.partitions is not None:\n data['partition schema'] = self.partitions\n\n pprint.pprint(data, stream=buf)\n\n return buf.getvalue()\n\n @property\n def is_partitioned(self):\n return self.partitions is not None\n\n create_time = _get_meta('info', 'CreateTime')\n location = _get_meta('info', 'Location')\n owner = _get_meta('info', 'Owner')\n num_rows = _get_meta('info', ['Table Parameters', 'numRows'])\n hive_format = _get_meta('storage', 'InputFormat')\n\n tbl_properties = _get_meta('info', 'Table Parameters')\n serde_properties = _get_meta('storage', 'Desc Params')\n\n\nclass TableInfo(object):\n pass\n\n\nclass TableStorageInfo(object):\n pass\n", "path": "ibis/impala/metadata.py"}]} | 3,312 | 159 |
gh_patches_debug_9255 | rasdani/github-patches | git_diff | encode__httpx-2999 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ASGI `raw_path` scope key should not include the query string portion.
I ran into a bug where I had written code that assumed `raw_path` as provided by Uvicorn would include the query string, where it turns out not to.
My tests didn't catch this because they were exercising the code using HTTPX ASGI emulation, and it turns out HTTPX thinks that `raw_path` DOES include the query string.
In Uvicorn: https://github.com/encode/uvicorn/blob/93bb8d3879808ae376b57e3721cc227fce2c27c1/uvicorn/protocols/http/h11_impl.py#L207
```python
raw_path, _, query_string = event.target.partition(b"?")
```
But in HTTPX: https://github.com/encode/httpx/blob/9415af643f23600403740baad0a466edc5cdbec1/httpx/_urls.py#L277-L292
I'm pretty confident HTTPX is incorrect about this. The ASGI spec (coincidentally the one bit of it I contributed directly to) says: https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope
> `raw_path` (*byte string*) -- The original HTTP path component unmodified from the bytes that were received by the web server.
On reading it now I realize this is a little ambiguous.
Daphne (the closest we have to a reference implementation of ASGI) backs up the idea that `raw_path` and `path` should be almost identical except for their encoding: https://github.com/django/daphne/blob/e49c39a4e5fac8ec170dd653641a9e90844fd3f1/daphne/ws_protocol.py#L77C1-L78
```python
"path": unquote(self.path.decode("ascii")),
"raw_path": self.path,
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_transports/asgi.py`
Content:
```
1 import typing
2
3 import sniffio
4
5 from .._models import Request, Response
6 from .._types import AsyncByteStream
7 from .base import AsyncBaseTransport
8
9 if typing.TYPE_CHECKING: # pragma: no cover
10 import asyncio
11
12 import trio
13
14 Event = typing.Union[asyncio.Event, trio.Event]
15
16
17 _Message = typing.Dict[str, typing.Any]
18 _Receive = typing.Callable[[], typing.Awaitable[_Message]]
19 _Send = typing.Callable[
20 [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]
21 ]
22 _ASGIApp = typing.Callable[
23 [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]
24 ]
25
26
27 def create_event() -> "Event":
28 if sniffio.current_async_library() == "trio":
29 import trio
30
31 return trio.Event()
32 else:
33 import asyncio
34
35 return asyncio.Event()
36
37
38 class ASGIResponseStream(AsyncByteStream):
39 def __init__(self, body: typing.List[bytes]) -> None:
40 self._body = body
41
42 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
43 yield b"".join(self._body)
44
45
46 class ASGITransport(AsyncBaseTransport):
47 """
48 A custom AsyncTransport that handles sending requests directly to an ASGI app.
49 The simplest way to use this functionality is to use the `app` argument.
50
51 ```
52 client = httpx.AsyncClient(app=app)
53 ```
54
55 Alternatively, you can setup the transport instance explicitly.
56 This allows you to include any additional configuration arguments specific
57 to the ASGITransport class:
58
59 ```
60 transport = httpx.ASGITransport(
61 app=app,
62 root_path="/submount",
63 client=("1.2.3.4", 123)
64 )
65 client = httpx.AsyncClient(transport=transport)
66 ```
67
68 Arguments:
69
70 * `app` - The ASGI application.
71 * `raise_app_exceptions` - Boolean indicating if exceptions in the application
72 should be raised. Default to `True`. Can be set to `False` for use cases
73 such as testing the content of a client 500 response.
74 * `root_path` - The root path on which the ASGI application should be mounted.
75 * `client` - A two-tuple indicating the client IP and port of incoming requests.
76 ```
77 """
78
79 def __init__(
80 self,
81 app: _ASGIApp,
82 raise_app_exceptions: bool = True,
83 root_path: str = "",
84 client: typing.Tuple[str, int] = ("127.0.0.1", 123),
85 ) -> None:
86 self.app = app
87 self.raise_app_exceptions = raise_app_exceptions
88 self.root_path = root_path
89 self.client = client
90
91 async def handle_async_request(
92 self,
93 request: Request,
94 ) -> Response:
95 assert isinstance(request.stream, AsyncByteStream)
96
97 # ASGI scope.
98 scope = {
99 "type": "http",
100 "asgi": {"version": "3.0"},
101 "http_version": "1.1",
102 "method": request.method,
103 "headers": [(k.lower(), v) for (k, v) in request.headers.raw],
104 "scheme": request.url.scheme,
105 "path": request.url.path,
106 "raw_path": request.url.raw_path,
107 "query_string": request.url.query,
108 "server": (request.url.host, request.url.port),
109 "client": self.client,
110 "root_path": self.root_path,
111 }
112
113 # Request.
114 request_body_chunks = request.stream.__aiter__()
115 request_complete = False
116
117 # Response.
118 status_code = None
119 response_headers = None
120 body_parts = []
121 response_started = False
122 response_complete = create_event()
123
124 # ASGI callables.
125
126 async def receive() -> typing.Dict[str, typing.Any]:
127 nonlocal request_complete
128
129 if request_complete:
130 await response_complete.wait()
131 return {"type": "http.disconnect"}
132
133 try:
134 body = await request_body_chunks.__anext__()
135 except StopAsyncIteration:
136 request_complete = True
137 return {"type": "http.request", "body": b"", "more_body": False}
138 return {"type": "http.request", "body": body, "more_body": True}
139
140 async def send(message: typing.Dict[str, typing.Any]) -> None:
141 nonlocal status_code, response_headers, response_started
142
143 if message["type"] == "http.response.start":
144 assert not response_started
145
146 status_code = message["status"]
147 response_headers = message.get("headers", [])
148 response_started = True
149
150 elif message["type"] == "http.response.body":
151 assert not response_complete.is_set()
152 body = message.get("body", b"")
153 more_body = message.get("more_body", False)
154
155 if body and request.method != "HEAD":
156 body_parts.append(body)
157
158 if not more_body:
159 response_complete.set()
160
161 try:
162 await self.app(scope, receive, send)
163 except Exception: # noqa: PIE-786
164 if self.raise_app_exceptions:
165 raise
166
167 response_complete.set()
168 if status_code is None:
169 status_code = 500
170 if response_headers is None:
171 response_headers = {}
172
173 assert response_complete.is_set()
174 assert status_code is not None
175 assert response_headers is not None
176
177 stream = ASGIResponseStream(body_parts)
178
179 return Response(status_code, headers=response_headers, stream=stream)
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py
--- a/httpx/_transports/asgi.py
+++ b/httpx/_transports/asgi.py
@@ -103,7 +103,7 @@
"headers": [(k.lower(), v) for (k, v) in request.headers.raw],
"scheme": request.url.scheme,
"path": request.url.path,
- "raw_path": request.url.raw_path,
+ "raw_path": request.url.raw_path.split(b"?")[0],
"query_string": request.url.query,
"server": (request.url.host, request.url.port),
"client": self.client,
| {"golden_diff": "diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py\n--- a/httpx/_transports/asgi.py\n+++ b/httpx/_transports/asgi.py\n@@ -103,7 +103,7 @@\n \"headers\": [(k.lower(), v) for (k, v) in request.headers.raw],\n \"scheme\": request.url.scheme,\n \"path\": request.url.path,\n- \"raw_path\": request.url.raw_path,\n+ \"raw_path\": request.url.raw_path.split(b\"?\")[0],\n \"query_string\": request.url.query,\n \"server\": (request.url.host, request.url.port),\n \"client\": self.client,\n", "issue": "ASGI `raw_path` scope key should not include the query string portion.\nI ran into a bug where I had written code that assumed `raw_path` as provided by Uvicorn would include the query string, where it turns out not to.\r\n\r\nMy tests didn't catch this because they were exercising the code using HTTPX ASGI emulation, and it turns out HTTPX thinks that `raw_path` DOES include the query string.\r\n\r\nIn Uvicorn: https://github.com/encode/uvicorn/blob/93bb8d3879808ae376b57e3721cc227fce2c27c1/uvicorn/protocols/http/h11_impl.py#L207\r\n```python\r\n raw_path, _, query_string = event.target.partition(b\"?\")\r\n```\r\nBut in HTTPX: https://github.com/encode/httpx/blob/9415af643f23600403740baad0a466edc5cdbec1/httpx/_urls.py#L277-L292\r\n\r\nI'm pretty confident HTTPX is incorrect about this. The ASGI spec (coincidentally the one bit of it I contributed directly to) says: https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope\r\n\r\n> `raw_path` (*byte string*) -- The original HTTP path component unmodified from the bytes that were received by the web server.\r\n\r\nOn reading it now I realize this is a little ambiguous.\r\n\r\nDaphne (the closest we have to a reference implementation of ASGI) backs up the idea that `raw_path` and `path` should be almost identical except for their encoding: https://github.com/django/daphne/blob/e49c39a4e5fac8ec170dd653641a9e90844fd3f1/daphne/ws_protocol.py#L77C1-L78\r\n\r\n```python\r\n \"path\": unquote(self.path.decode(\"ascii\")),\r\n \"raw_path\": self.path,\r\n```\n", "before_files": [{"content": "import typing\n\nimport sniffio\n\nfrom .._models import Request, Response\nfrom .._types import AsyncByteStream\nfrom .base import AsyncBaseTransport\n\nif typing.TYPE_CHECKING: # pragma: no cover\n import asyncio\n\n import trio\n\n Event = typing.Union[asyncio.Event, trio.Event]\n\n\n_Message = typing.Dict[str, typing.Any]\n_Receive = typing.Callable[[], typing.Awaitable[_Message]]\n_Send = typing.Callable[\n [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]\n]\n_ASGIApp = typing.Callable[\n [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]\n]\n\n\ndef create_event() -> \"Event\":\n if sniffio.current_async_library() == \"trio\":\n import trio\n\n return trio.Event()\n else:\n import asyncio\n\n return asyncio.Event()\n\n\nclass ASGIResponseStream(AsyncByteStream):\n def __init__(self, body: typing.List[bytes]) -> None:\n self._body = body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b\"\".join(self._body)\n\n\nclass ASGITransport(AsyncBaseTransport):\n \"\"\"\n A custom AsyncTransport that handles sending requests directly to an ASGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.AsyncClient(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the ASGITransport class:\n\n ```\n transport = httpx.ASGITransport(\n app=app,\n root_path=\"/submount\",\n client=(\"1.2.3.4\", 123)\n )\n client = httpx.AsyncClient(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `root_path` - The root path on which the ASGI application should be mounted.\n * `client` - A two-tuple indicating the client IP and port of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: _ASGIApp,\n raise_app_exceptions: bool = True,\n root_path: str = \"\",\n client: typing.Tuple[str, int] = (\"127.0.0.1\", 123),\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.root_path = root_path\n self.client = client\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, AsyncByteStream)\n\n # ASGI scope.\n scope = {\n \"type\": \"http\",\n \"asgi\": {\"version\": \"3.0\"},\n \"http_version\": \"1.1\",\n \"method\": request.method,\n \"headers\": [(k.lower(), v) for (k, v) in request.headers.raw],\n \"scheme\": request.url.scheme,\n \"path\": request.url.path,\n \"raw_path\": request.url.raw_path,\n \"query_string\": request.url.query,\n \"server\": (request.url.host, request.url.port),\n \"client\": self.client,\n \"root_path\": self.root_path,\n }\n\n # Request.\n request_body_chunks = request.stream.__aiter__()\n request_complete = False\n\n # Response.\n status_code = None\n response_headers = None\n body_parts = []\n response_started = False\n response_complete = create_event()\n\n # ASGI callables.\n\n async def receive() -> typing.Dict[str, typing.Any]:\n nonlocal request_complete\n\n if request_complete:\n await response_complete.wait()\n return {\"type\": \"http.disconnect\"}\n\n try:\n body = await request_body_chunks.__anext__()\n except StopAsyncIteration:\n request_complete = True\n return {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n return {\"type\": \"http.request\", \"body\": body, \"more_body\": True}\n\n async def send(message: typing.Dict[str, typing.Any]) -> None:\n nonlocal status_code, response_headers, response_started\n\n if message[\"type\"] == \"http.response.start\":\n assert not response_started\n\n status_code = message[\"status\"]\n response_headers = message.get(\"headers\", [])\n response_started = True\n\n elif message[\"type\"] == \"http.response.body\":\n assert not response_complete.is_set()\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n if body and request.method != \"HEAD\":\n body_parts.append(body)\n\n if not more_body:\n response_complete.set()\n\n try:\n await self.app(scope, receive, send)\n except Exception: # noqa: PIE-786\n if self.raise_app_exceptions:\n raise\n\n response_complete.set()\n if status_code is None:\n status_code = 500\n if response_headers is None:\n response_headers = {}\n\n assert response_complete.is_set()\n assert status_code is not None\n assert response_headers is not None\n\n stream = ASGIResponseStream(body_parts)\n\n return Response(status_code, headers=response_headers, stream=stream)\n", "path": "httpx/_transports/asgi.py"}], "after_files": [{"content": "import typing\n\nimport sniffio\n\nfrom .._models import Request, Response\nfrom .._types import AsyncByteStream\nfrom .base import AsyncBaseTransport\n\nif typing.TYPE_CHECKING: # pragma: no cover\n import asyncio\n\n import trio\n\n Event = typing.Union[asyncio.Event, trio.Event]\n\n\n_Message = typing.Dict[str, typing.Any]\n_Receive = typing.Callable[[], typing.Awaitable[_Message]]\n_Send = typing.Callable[\n [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]\n]\n_ASGIApp = typing.Callable[\n [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]\n]\n\n\ndef create_event() -> \"Event\":\n if sniffio.current_async_library() == \"trio\":\n import trio\n\n return trio.Event()\n else:\n import asyncio\n\n return asyncio.Event()\n\n\nclass ASGIResponseStream(AsyncByteStream):\n def __init__(self, body: typing.List[bytes]) -> None:\n self._body = body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b\"\".join(self._body)\n\n\nclass ASGITransport(AsyncBaseTransport):\n \"\"\"\n A custom AsyncTransport that handles sending requests directly to an ASGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.AsyncClient(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the ASGITransport class:\n\n ```\n transport = httpx.ASGITransport(\n app=app,\n root_path=\"/submount\",\n client=(\"1.2.3.4\", 123)\n )\n client = httpx.AsyncClient(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `root_path` - The root path on which the ASGI application should be mounted.\n * `client` - A two-tuple indicating the client IP and port of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: _ASGIApp,\n raise_app_exceptions: bool = True,\n root_path: str = \"\",\n client: typing.Tuple[str, int] = (\"127.0.0.1\", 123),\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.root_path = root_path\n self.client = client\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, AsyncByteStream)\n\n # ASGI scope.\n scope = {\n \"type\": \"http\",\n \"asgi\": {\"version\": \"3.0\"},\n \"http_version\": \"1.1\",\n \"method\": request.method,\n \"headers\": [(k.lower(), v) for (k, v) in request.headers.raw],\n \"scheme\": request.url.scheme,\n \"path\": request.url.path,\n \"raw_path\": request.url.raw_path.split(b\"?\")[0],\n \"query_string\": request.url.query,\n \"server\": (request.url.host, request.url.port),\n \"client\": self.client,\n \"root_path\": self.root_path,\n }\n\n # Request.\n request_body_chunks = request.stream.__aiter__()\n request_complete = False\n\n # Response.\n status_code = None\n response_headers = None\n body_parts = []\n response_started = False\n response_complete = create_event()\n\n # ASGI callables.\n\n async def receive() -> typing.Dict[str, typing.Any]:\n nonlocal request_complete\n\n if request_complete:\n await response_complete.wait()\n return {\"type\": \"http.disconnect\"}\n\n try:\n body = await request_body_chunks.__anext__()\n except StopAsyncIteration:\n request_complete = True\n return {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n return {\"type\": \"http.request\", \"body\": body, \"more_body\": True}\n\n async def send(message: typing.Dict[str, typing.Any]) -> None:\n nonlocal status_code, response_headers, response_started\n\n if message[\"type\"] == \"http.response.start\":\n assert not response_started\n\n status_code = message[\"status\"]\n response_headers = message.get(\"headers\", [])\n response_started = True\n\n elif message[\"type\"] == \"http.response.body\":\n assert not response_complete.is_set()\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n if body and request.method != \"HEAD\":\n body_parts.append(body)\n\n if not more_body:\n response_complete.set()\n\n try:\n await self.app(scope, receive, send)\n except Exception: # noqa: PIE-786\n if self.raise_app_exceptions:\n raise\n\n response_complete.set()\n if status_code is None:\n status_code = 500\n if response_headers is None:\n response_headers = {}\n\n assert response_complete.is_set()\n assert status_code is not None\n assert response_headers is not None\n\n stream = ASGIResponseStream(body_parts)\n\n return Response(status_code, headers=response_headers, stream=stream)\n", "path": "httpx/_transports/asgi.py"}]} | 2,388 | 151 |
gh_patches_debug_5674 | rasdani/github-patches | git_diff | mozilla__bugbug-1214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log number of spam/non-spam bugs in SpamBug get_labels
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/spambug.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.under_sampling import RandomUnderSampler
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features, bugzilla, feature_cleanup
13 from bugbug.model import BugModel
14
15
16 class SpamBugModel(BugModel):
17 def __init__(self, lemmatization=False):
18 BugModel.__init__(self, lemmatization)
19
20 self.sampler = RandomUnderSampler(random_state=0)
21
22 feature_extractors = [
23 bug_features.has_str(),
24 bug_features.has_regression_range(),
25 bug_features.severity(),
26 bug_features.is_coverity_issue(),
27 bug_features.has_crash_signature(),
28 bug_features.has_url(),
29 bug_features.has_w3c_url(),
30 bug_features.has_github_url(),
31 bug_features.whiteboard(),
32 bug_features.patches(),
33 bug_features.landings(),
34 bug_features.product(),
35 bug_features.component(),
36 bug_features.num_words_title(),
37 bug_features.num_words_comments(),
38 bug_features.keywords(),
39 ]
40
41 cleanup_functions = [
42 feature_cleanup.fileref(),
43 feature_cleanup.url(),
44 feature_cleanup.synonyms(),
45 ]
46
47 self.extraction_pipeline = Pipeline(
48 [
49 (
50 "bug_extractor",
51 bug_features.BugExtractor(
52 feature_extractors, cleanup_functions, rollback=True
53 ),
54 ),
55 (
56 "union",
57 ColumnTransformer(
58 [
59 ("data", DictVectorizer(), "data"),
60 ("title", self.text_vectorizer(), "title"),
61 ("comments", self.text_vectorizer(), "comments"),
62 ]
63 ),
64 ),
65 ]
66 )
67
68 self.clf = xgboost.XGBClassifier(n_jobs=16)
69 self.clf.set_params(predictor="cpu_predictor")
70
71 def get_labels(self):
72 classes = {}
73
74 for bug_data in bugzilla.get_bugs(include_invalid=True):
75 bug_id = bug_data["id"]
76
77 # Legitimate bugs
78 if bug_data["resolution"] == "FIXED":
79 classes[bug_id] = 0
80
81 # Spam bugs
82 elif (
83 bug_data["product"] == "Invalid Bugs"
84 and bug_data["component"] == "General"
85 ):
86 classes[bug_id] = 1
87
88 return classes, [0, 1]
89
90 def items_gen(self, classes):
91 # Overwriting this method to add include_invalid=True to get_bugs to
92 # include spam bugs.
93 return (
94 (bug, classes[bug["id"]])
95 for bug in bugzilla.get_bugs(include_invalid=True)
96 if bug["id"] in classes
97 )
98
99 def get_feature_names(self):
100 return self.extraction_pipeline.named_steps["union"].get_feature_names()
101
102 def overwrite_classes(self, bugs, classes, probabilities):
103 for (i, bug) in enumerate(bugs):
104 if "@mozilla" in bug["creator"]:
105 if probabilities:
106 classes[i] = [1.0, 0.0]
107 else:
108 classes[i] = 0
109
110 return classes
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py
--- a/bugbug/models/spambug.py
+++ b/bugbug/models/spambug.py
@@ -85,6 +85,17 @@
):
classes[bug_id] = 1
+ print(
+ "{} bugs are classified as non-spam".format(
+ sum(1 for label in classes.values() if label == 0)
+ )
+ )
+ print(
+ "{} bugs are classified as spam".format(
+ sum(1 for label in classes.values() if label == 1)
+ )
+ )
+
return classes, [0, 1]
def items_gen(self, classes):
| {"golden_diff": "diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py\n--- a/bugbug/models/spambug.py\n+++ b/bugbug/models/spambug.py\n@@ -85,6 +85,17 @@\n ):\n classes[bug_id] = 1\n \n+ print(\n+ \"{} bugs are classified as non-spam\".format(\n+ sum(1 for label in classes.values() if label == 0)\n+ )\n+ )\n+ print(\n+ \"{} bugs are classified as spam\".format(\n+ sum(1 for label in classes.values() if label == 1)\n+ )\n+ )\n+\n return classes, [0, 1]\n \n def items_gen(self, classes):\n", "issue": "Log number of spam/non-spam bugs in SpamBug get_labels\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.product(),\n bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Legitimate bugs\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # Spam bugs\n elif (\n bug_data[\"product\"] == \"Invalid Bugs\"\n and bug_data[\"component\"] == \"General\"\n ):\n classes[bug_id] = 1\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for (i, bug) in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.product(),\n bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Legitimate bugs\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # Spam bugs\n elif (\n bug_data[\"product\"] == \"Invalid Bugs\"\n and bug_data[\"component\"] == \"General\"\n ):\n classes[bug_id] = 1\n\n print(\n \"{} bugs are classified as non-spam\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n print(\n \"{} bugs are classified as spam\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for (i, bug) in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py"}]} | 1,218 | 167 |
gh_patches_debug_32358 | rasdani/github-patches | git_diff | deepset-ai__haystack-5809 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change `MemoryEmbeddingRetriever` to non-batch mode
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/preview/components/retrievers/memory.py`
Content:
```
1 from typing import Dict, List, Any, Optional
2
3 from haystack.preview import component, Document, default_to_dict, default_from_dict, DeserializationError
4 from haystack.preview.document_stores import MemoryDocumentStore, document_store
5
6
7 @component
8 class MemoryBM25Retriever:
9 """
10 A component for retrieving documents from a MemoryDocumentStore using the BM25 algorithm.
11
12 Needs to be connected to a MemoryDocumentStore to run.
13 """
14
15 def __init__(
16 self,
17 document_store: MemoryDocumentStore,
18 filters: Optional[Dict[str, Any]] = None,
19 top_k: int = 10,
20 scale_score: bool = True,
21 ):
22 """
23 Create a MemoryBM25Retriever component.
24
25 :param document_store: An instance of MemoryDocumentStore.
26 :param filters: A dictionary with filters to narrow down the search space. Default is None.
27 :param top_k: The maximum number of documents to retrieve. Default is 10.
28 :param scale_score: Whether to scale the BM25 score or not. Default is True.
29
30 :raises ValueError: If the specified top_k is not > 0.
31 """
32 if not isinstance(document_store, MemoryDocumentStore):
33 raise ValueError("document_store must be an instance of MemoryDocumentStore")
34
35 self.document_store = document_store
36
37 if top_k <= 0:
38 raise ValueError(f"top_k must be > 0, but got {top_k}")
39
40 self.filters = filters
41 self.top_k = top_k
42 self.scale_score = scale_score
43
44 def to_dict(self) -> Dict[str, Any]:
45 """
46 Serialize this component to a dictionary.
47 """
48 docstore = self.document_store.to_dict()
49 return default_to_dict(
50 self, document_store=docstore, filters=self.filters, top_k=self.top_k, scale_score=self.scale_score
51 )
52
53 @classmethod
54 def from_dict(cls, data: Dict[str, Any]) -> "MemoryBM25Retriever":
55 """
56 Deserialize this component from a dictionary.
57 """
58 init_params = data.get("init_parameters", {})
59 if "document_store" not in init_params:
60 raise DeserializationError("Missing 'document_store' in serialization data")
61 if "type" not in init_params["document_store"]:
62 raise DeserializationError("Missing 'type' in document store's serialization data")
63 if init_params["document_store"]["type"] not in document_store.registry:
64 raise DeserializationError(f"DocumentStore type '{init_params['document_store']['type']}' not found")
65
66 docstore_class = document_store.registry[init_params["document_store"]["type"]]
67 docstore = docstore_class.from_dict(init_params["document_store"])
68 data["init_parameters"]["document_store"] = docstore
69 return default_from_dict(cls, data)
70
71 @component.output_types(documents=List[Document])
72 def run(
73 self,
74 query: str,
75 filters: Optional[Dict[str, Any]] = None,
76 top_k: Optional[int] = None,
77 scale_score: Optional[bool] = None,
78 ):
79 """
80 Run the MemoryBM25Retriever on the given input data.
81
82 :param query: The query string for the retriever.
83 :param filters: A dictionary with filters to narrow down the search space.
84 :param top_k: The maximum number of documents to return.
85 :param scale_score: Whether to scale the BM25 scores or not.
86 :return: The retrieved documents.
87
88 :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.
89 """
90 if filters is None:
91 filters = self.filters
92 if top_k is None:
93 top_k = self.top_k
94 if scale_score is None:
95 scale_score = self.scale_score
96
97 docs = self.document_store.bm25_retrieval(query=query, filters=filters, top_k=top_k, scale_score=scale_score)
98 return {"documents": docs}
99
100
101 @component
102 class MemoryEmbeddingRetriever:
103 """
104 A component for retrieving documents from a MemoryDocumentStore using a vector similarity metric.
105
106 Needs to be connected to a MemoryDocumentStore to run.
107 """
108
109 def __init__(
110 self,
111 document_store: MemoryDocumentStore,
112 filters: Optional[Dict[str, Any]] = None,
113 top_k: int = 10,
114 scale_score: bool = True,
115 return_embedding: bool = False,
116 ):
117 """
118 Create a MemoryEmbeddingRetriever component.
119
120 :param document_store: An instance of MemoryDocumentStore.
121 :param filters: A dictionary with filters to narrow down the search space. Default is None.
122 :param top_k: The maximum number of documents to retrieve. Default is 10.
123 :param scale_score: Whether to scale the scores of the retrieved documents or not. Default is True.
124 :param return_embedding: Whether to return the embedding of the retrieved Documents. Default is False.
125
126 :raises ValueError: If the specified top_k is not > 0.
127 """
128 if not isinstance(document_store, MemoryDocumentStore):
129 raise ValueError("document_store must be an instance of MemoryDocumentStore")
130
131 self.document_store = document_store
132
133 if top_k <= 0:
134 raise ValueError(f"top_k must be > 0, but got {top_k}")
135
136 self.filters = filters
137 self.top_k = top_k
138 self.scale_score = scale_score
139 self.return_embedding = return_embedding
140
141 def to_dict(self) -> Dict[str, Any]:
142 """
143 Serialize this component to a dictionary.
144 """
145 docstore = self.document_store.to_dict()
146 return default_to_dict(
147 self,
148 document_store=docstore,
149 filters=self.filters,
150 top_k=self.top_k,
151 scale_score=self.scale_score,
152 return_embedding=self.return_embedding,
153 )
154
155 @classmethod
156 def from_dict(cls, data: Dict[str, Any]) -> "MemoryBM25Retriever":
157 """
158 Deserialize this component from a dictionary.
159 """
160 init_params = data.get("init_parameters", {})
161 if "document_store" not in init_params:
162 raise DeserializationError("Missing 'document_store' in serialization data")
163 if "type" not in init_params["document_store"]:
164 raise DeserializationError("Missing 'type' in document store's serialization data")
165 if init_params["document_store"]["type"] not in document_store.registry:
166 raise DeserializationError(f"DocumentStore type '{init_params['document_store']['type']}' not found")
167
168 docstore_class = document_store.registry[init_params["document_store"]["type"]]
169 docstore = docstore_class.from_dict(init_params["document_store"])
170 data["init_parameters"]["document_store"] = docstore
171 return default_from_dict(cls, data)
172
173 @component.output_types(documents=List[List[Document]])
174 def run(
175 self,
176 queries_embeddings: List[List[float]],
177 filters: Optional[Dict[str, Any]] = None,
178 top_k: Optional[int] = None,
179 scale_score: Optional[bool] = None,
180 return_embedding: Optional[bool] = None,
181 ):
182 """
183 Run the MemoryEmbeddingRetriever on the given input data.
184
185 :param queries_embeddings: Embeddings of the queries.
186 :param filters: A dictionary with filters to narrow down the search space.
187 :param top_k: The maximum number of documents to return.
188 :param scale_score: Whether to scale the scores of the retrieved documents or not.
189 :param return_embedding: Whether to return the embedding of the retrieved Documents.
190 :return: The retrieved documents.
191
192 :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.
193 """
194 if filters is None:
195 filters = self.filters
196 if top_k is None:
197 top_k = self.top_k
198 if scale_score is None:
199 scale_score = self.scale_score
200 if return_embedding is None:
201 return_embedding = self.return_embedding
202
203 docs = []
204 for query_embedding in queries_embeddings:
205 docs.append(
206 self.document_store.embedding_retrieval(
207 query_embedding=query_embedding,
208 filters=filters,
209 top_k=top_k,
210 scale_score=scale_score,
211 return_embedding=return_embedding,
212 )
213 )
214 return {"documents": docs}
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/preview/components/retrievers/memory.py b/haystack/preview/components/retrievers/memory.py
--- a/haystack/preview/components/retrievers/memory.py
+++ b/haystack/preview/components/retrievers/memory.py
@@ -173,7 +173,7 @@
@component.output_types(documents=List[List[Document]])
def run(
self,
- queries_embeddings: List[List[float]],
+ query_embedding: List[float],
filters: Optional[Dict[str, Any]] = None,
top_k: Optional[int] = None,
scale_score: Optional[bool] = None,
@@ -182,7 +182,7 @@
"""
Run the MemoryEmbeddingRetriever on the given input data.
- :param queries_embeddings: Embeddings of the queries.
+ :param query_embedding: Embedding of the query.
:param filters: A dictionary with filters to narrow down the search space.
:param top_k: The maximum number of documents to return.
:param scale_score: Whether to scale the scores of the retrieved documents or not.
@@ -200,15 +200,12 @@
if return_embedding is None:
return_embedding = self.return_embedding
- docs = []
- for query_embedding in queries_embeddings:
- docs.append(
- self.document_store.embedding_retrieval(
- query_embedding=query_embedding,
- filters=filters,
- top_k=top_k,
- scale_score=scale_score,
- return_embedding=return_embedding,
- )
- )
+ docs = self.document_store.embedding_retrieval(
+ query_embedding=query_embedding,
+ filters=filters,
+ top_k=top_k,
+ scale_score=scale_score,
+ return_embedding=return_embedding,
+ )
+
return {"documents": docs}
| {"golden_diff": "diff --git a/haystack/preview/components/retrievers/memory.py b/haystack/preview/components/retrievers/memory.py\n--- a/haystack/preview/components/retrievers/memory.py\n+++ b/haystack/preview/components/retrievers/memory.py\n@@ -173,7 +173,7 @@\n @component.output_types(documents=List[List[Document]])\n def run(\n self,\n- queries_embeddings: List[List[float]],\n+ query_embedding: List[float],\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n@@ -182,7 +182,7 @@\n \"\"\"\n Run the MemoryEmbeddingRetriever on the given input data.\n \n- :param queries_embeddings: Embeddings of the queries.\n+ :param query_embedding: Embedding of the query.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the scores of the retrieved documents or not.\n@@ -200,15 +200,12 @@\n if return_embedding is None:\n return_embedding = self.return_embedding\n \n- docs = []\n- for query_embedding in queries_embeddings:\n- docs.append(\n- self.document_store.embedding_retrieval(\n- query_embedding=query_embedding,\n- filters=filters,\n- top_k=top_k,\n- scale_score=scale_score,\n- return_embedding=return_embedding,\n- )\n- )\n+ docs = self.document_store.embedding_retrieval(\n+ query_embedding=query_embedding,\n+ filters=filters,\n+ top_k=top_k,\n+ scale_score=scale_score,\n+ return_embedding=return_embedding,\n+ )\n+\n return {\"documents\": docs}\n", "issue": "Change `MemoryEmbeddingRetriever` to non-batch mode\n\n", "before_files": [{"content": "from typing import Dict, List, Any, Optional\n\nfrom haystack.preview import component, Document, default_to_dict, default_from_dict, DeserializationError\nfrom haystack.preview.document_stores import MemoryDocumentStore, document_store\n\n\n@component\nclass MemoryBM25Retriever:\n \"\"\"\n A component for retrieving documents from a MemoryDocumentStore using the BM25 algorithm.\n\n Needs to be connected to a MemoryDocumentStore to run.\n \"\"\"\n\n def __init__(\n self,\n document_store: MemoryDocumentStore,\n filters: Optional[Dict[str, Any]] = None,\n top_k: int = 10,\n scale_score: bool = True,\n ):\n \"\"\"\n Create a MemoryBM25Retriever component.\n\n :param document_store: An instance of MemoryDocumentStore.\n :param filters: A dictionary with filters to narrow down the search space. Default is None.\n :param top_k: The maximum number of documents to retrieve. Default is 10.\n :param scale_score: Whether to scale the BM25 score or not. Default is True.\n\n :raises ValueError: If the specified top_k is not > 0.\n \"\"\"\n if not isinstance(document_store, MemoryDocumentStore):\n raise ValueError(\"document_store must be an instance of MemoryDocumentStore\")\n\n self.document_store = document_store\n\n if top_k <= 0:\n raise ValueError(f\"top_k must be > 0, but got {top_k}\")\n\n self.filters = filters\n self.top_k = top_k\n self.scale_score = scale_score\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self, document_store=docstore, filters=self.filters, top_k=self.top_k, scale_score=self.scale_score\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"MemoryBM25Retriever\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n if init_params[\"document_store\"][\"type\"] not in document_store.registry:\n raise DeserializationError(f\"DocumentStore type '{init_params['document_store']['type']}' not found\")\n\n docstore_class = document_store.registry[init_params[\"document_store\"][\"type\"]]\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(documents=List[Document])\n def run(\n self,\n query: str,\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n ):\n \"\"\"\n Run the MemoryBM25Retriever on the given input data.\n\n :param query: The query string for the retriever.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the BM25 scores or not.\n :return: The retrieved documents.\n\n :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.\n \"\"\"\n if filters is None:\n filters = self.filters\n if top_k is None:\n top_k = self.top_k\n if scale_score is None:\n scale_score = self.scale_score\n\n docs = self.document_store.bm25_retrieval(query=query, filters=filters, top_k=top_k, scale_score=scale_score)\n return {\"documents\": docs}\n\n\n@component\nclass MemoryEmbeddingRetriever:\n \"\"\"\n A component for retrieving documents from a MemoryDocumentStore using a vector similarity metric.\n\n Needs to be connected to a MemoryDocumentStore to run.\n \"\"\"\n\n def __init__(\n self,\n document_store: MemoryDocumentStore,\n filters: Optional[Dict[str, Any]] = None,\n top_k: int = 10,\n scale_score: bool = True,\n return_embedding: bool = False,\n ):\n \"\"\"\n Create a MemoryEmbeddingRetriever component.\n\n :param document_store: An instance of MemoryDocumentStore.\n :param filters: A dictionary with filters to narrow down the search space. Default is None.\n :param top_k: The maximum number of documents to retrieve. Default is 10.\n :param scale_score: Whether to scale the scores of the retrieved documents or not. Default is True.\n :param return_embedding: Whether to return the embedding of the retrieved Documents. Default is False.\n\n :raises ValueError: If the specified top_k is not > 0.\n \"\"\"\n if not isinstance(document_store, MemoryDocumentStore):\n raise ValueError(\"document_store must be an instance of MemoryDocumentStore\")\n\n self.document_store = document_store\n\n if top_k <= 0:\n raise ValueError(f\"top_k must be > 0, but got {top_k}\")\n\n self.filters = filters\n self.top_k = top_k\n self.scale_score = scale_score\n self.return_embedding = return_embedding\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self,\n document_store=docstore,\n filters=self.filters,\n top_k=self.top_k,\n scale_score=self.scale_score,\n return_embedding=self.return_embedding,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"MemoryBM25Retriever\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n if init_params[\"document_store\"][\"type\"] not in document_store.registry:\n raise DeserializationError(f\"DocumentStore type '{init_params['document_store']['type']}' not found\")\n\n docstore_class = document_store.registry[init_params[\"document_store\"][\"type\"]]\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(documents=List[List[Document]])\n def run(\n self,\n queries_embeddings: List[List[float]],\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n return_embedding: Optional[bool] = None,\n ):\n \"\"\"\n Run the MemoryEmbeddingRetriever on the given input data.\n\n :param queries_embeddings: Embeddings of the queries.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the scores of the retrieved documents or not.\n :param return_embedding: Whether to return the embedding of the retrieved Documents.\n :return: The retrieved documents.\n\n :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.\n \"\"\"\n if filters is None:\n filters = self.filters\n if top_k is None:\n top_k = self.top_k\n if scale_score is None:\n scale_score = self.scale_score\n if return_embedding is None:\n return_embedding = self.return_embedding\n\n docs = []\n for query_embedding in queries_embeddings:\n docs.append(\n self.document_store.embedding_retrieval(\n query_embedding=query_embedding,\n filters=filters,\n top_k=top_k,\n scale_score=scale_score,\n return_embedding=return_embedding,\n )\n )\n return {\"documents\": docs}\n", "path": "haystack/preview/components/retrievers/memory.py"}], "after_files": [{"content": "from typing import Dict, List, Any, Optional\n\nfrom haystack.preview import component, Document, default_to_dict, default_from_dict, DeserializationError\nfrom haystack.preview.document_stores import MemoryDocumentStore, document_store\n\n\n@component\nclass MemoryBM25Retriever:\n \"\"\"\n A component for retrieving documents from a MemoryDocumentStore using the BM25 algorithm.\n\n Needs to be connected to a MemoryDocumentStore to run.\n \"\"\"\n\n def __init__(\n self,\n document_store: MemoryDocumentStore,\n filters: Optional[Dict[str, Any]] = None,\n top_k: int = 10,\n scale_score: bool = True,\n ):\n \"\"\"\n Create a MemoryBM25Retriever component.\n\n :param document_store: An instance of MemoryDocumentStore.\n :param filters: A dictionary with filters to narrow down the search space. Default is None.\n :param top_k: The maximum number of documents to retrieve. Default is 10.\n :param scale_score: Whether to scale the BM25 score or not. Default is True.\n\n :raises ValueError: If the specified top_k is not > 0.\n \"\"\"\n if not isinstance(document_store, MemoryDocumentStore):\n raise ValueError(\"document_store must be an instance of MemoryDocumentStore\")\n\n self.document_store = document_store\n\n if top_k <= 0:\n raise ValueError(f\"top_k must be > 0, but got {top_k}\")\n\n self.filters = filters\n self.top_k = top_k\n self.scale_score = scale_score\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self, document_store=docstore, filters=self.filters, top_k=self.top_k, scale_score=self.scale_score\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"MemoryBM25Retriever\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n if init_params[\"document_store\"][\"type\"] not in document_store.registry:\n raise DeserializationError(f\"DocumentStore type '{init_params['document_store']['type']}' not found\")\n\n docstore_class = document_store.registry[init_params[\"document_store\"][\"type\"]]\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(documents=List[Document])\n def run(\n self,\n query: str,\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n ):\n \"\"\"\n Run the MemoryBM25Retriever on the given input data.\n\n :param query: The query string for the retriever.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the BM25 scores or not.\n :return: The retrieved documents.\n\n :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.\n \"\"\"\n if filters is None:\n filters = self.filters\n if top_k is None:\n top_k = self.top_k\n if scale_score is None:\n scale_score = self.scale_score\n\n docs = self.document_store.bm25_retrieval(query=query, filters=filters, top_k=top_k, scale_score=scale_score)\n return {\"documents\": docs}\n\n\n@component\nclass MemoryEmbeddingRetriever:\n \"\"\"\n A component for retrieving documents from a MemoryDocumentStore using a vector similarity metric.\n\n Needs to be connected to a MemoryDocumentStore to run.\n \"\"\"\n\n def __init__(\n self,\n document_store: MemoryDocumentStore,\n filters: Optional[Dict[str, Any]] = None,\n top_k: int = 10,\n scale_score: bool = True,\n return_embedding: bool = False,\n ):\n \"\"\"\n Create a MemoryEmbeddingRetriever component.\n\n :param document_store: An instance of MemoryDocumentStore.\n :param filters: A dictionary with filters to narrow down the search space. Default is None.\n :param top_k: The maximum number of documents to retrieve. Default is 10.\n :param scale_score: Whether to scale the scores of the retrieved documents or not. Default is True.\n :param return_embedding: Whether to return the embedding of the retrieved Documents. Default is False.\n\n :raises ValueError: If the specified top_k is not > 0.\n \"\"\"\n if not isinstance(document_store, MemoryDocumentStore):\n raise ValueError(\"document_store must be an instance of MemoryDocumentStore\")\n\n self.document_store = document_store\n\n if top_k <= 0:\n raise ValueError(f\"top_k must be > 0, but got {top_k}\")\n\n self.filters = filters\n self.top_k = top_k\n self.scale_score = scale_score\n self.return_embedding = return_embedding\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self,\n document_store=docstore,\n filters=self.filters,\n top_k=self.top_k,\n scale_score=self.scale_score,\n return_embedding=self.return_embedding,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"MemoryBM25Retriever\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n if init_params[\"document_store\"][\"type\"] not in document_store.registry:\n raise DeserializationError(f\"DocumentStore type '{init_params['document_store']['type']}' not found\")\n\n docstore_class = document_store.registry[init_params[\"document_store\"][\"type\"]]\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(documents=List[List[Document]])\n def run(\n self,\n query_embedding: List[float],\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n return_embedding: Optional[bool] = None,\n ):\n \"\"\"\n Run the MemoryEmbeddingRetriever on the given input data.\n\n :param query_embedding: Embedding of the query.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the scores of the retrieved documents or not.\n :param return_embedding: Whether to return the embedding of the retrieved Documents.\n :return: The retrieved documents.\n\n :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.\n \"\"\"\n if filters is None:\n filters = self.filters\n if top_k is None:\n top_k = self.top_k\n if scale_score is None:\n scale_score = self.scale_score\n if return_embedding is None:\n return_embedding = self.return_embedding\n\n docs = self.document_store.embedding_retrieval(\n query_embedding=query_embedding,\n filters=filters,\n top_k=top_k,\n scale_score=scale_score,\n return_embedding=return_embedding,\n )\n\n return {\"documents\": docs}\n", "path": "haystack/preview/components/retrievers/memory.py"}]} | 2,613 | 408 |
gh_patches_debug_16905 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Downloading requirements
I was trying to install deeppavlov and had a problem following the installation steps.
1) There is no download.py file in root folder, it is in `deeppavlov/download.py`
``` sh
python download.py [-all]
```
2) Even if I use that file it outputs the error:
``` sh
(env) root@mysexyhost:~/work/ipavlov/DeepPavlov# python3 deeppavlov/download.py
/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
2018-03-12 07:34:11.490 ERROR in 'deeppavlov.core.models.serializable'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.serializable logger, logging to the stderr. Error traceback:
Traceback (most recent call last):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py", line 32, in get_logger
with open(log_config_path) as log_config_json:
TypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')
2018-03-12 07:34:11.491 ERROR in 'deeppavlov.core.models.keras_model'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.keras_model logger, logging to the stderr. Error traceback:
Traceback (most recent call last):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py", line 32, in get_logger
with open(log_config_path) as log_config_json:
TypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')
Traceback (most recent call last):
File "deeppavlov/download.py", line 24, in <module>
from deeppavlov.core.data.utils import download, download_decompress
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/__init__.py", line 1, in <module>
import deeppavlov.core.models.keras_model
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py", line 39, in <module>
class KerasModel(NNModel, metaclass=TfModelMeta):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py", line 143, in KerasModel
sample_weight_mode=None, weighted_metrics=None, target_tensors=None):
File "/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/overrides/overrides.py", line 70, in overrides
method.__name__)
AssertionError: No super class method found for "load"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram_utils/telegram_ui.py`
Content:
```
1 """
2 Copyright 2017 Neural Networks and Deep Learning lab, MIPT
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16 import telebot
17
18 from deeppavlov.core.common.file import read_json
19 from deeppavlov.core.commands.infer import build_model_from_config
20
21
22 def init_bot_for_model(token, model):
23 bot = telebot.TeleBot(token)
24
25 model_name = type(model).__name__
26 models_info = read_json('../telegram_utils/models_info.json')
27 model_info = models_info[model_name] if model_name in models_info else models_info['@default']
28
29 @bot.message_handler(commands=['start'])
30 def send_start_message(message):
31 chat_id = message.chat.id
32 out_message = model_info['start_message']
33 if hasattr(model, 'reset'):
34 model.reset()
35 bot.send_message(chat_id, out_message)
36
37 @bot.message_handler(commands=['help'])
38 def send_help_message(message):
39 chat_id = message.chat.id
40 out_message = model_info['help_message']
41 bot.send_message(chat_id, out_message)
42
43 @bot.message_handler()
44 def handle_inference(message):
45 chat_id = message.chat.id
46 context = message.text
47
48 pred = model([context])
49 reply_message = str(pred[0])
50 bot.send_message(chat_id, reply_message)
51
52 bot.polling()
53
54
55 def interact_model_by_telegram(config_path, token):
56 config = read_json(config_path)
57 model = build_model_from_config(config)
58 init_bot_for_model(token, model)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py
--- a/telegram_utils/telegram_ui.py
+++ b/telegram_utils/telegram_ui.py
@@ -13,6 +13,8 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+from pathlib import Path
+
import telebot
from deeppavlov.core.common.file import read_json
@@ -23,7 +25,8 @@
bot = telebot.TeleBot(token)
model_name = type(model).__name__
- models_info = read_json('../telegram_utils/models_info.json')
+ config_path = Path(__file__).parent / 'models_info.json'
+ models_info = read_json(str(config_path))
model_info = models_info[model_name] if model_name in models_info else models_info['@default']
@bot.message_handler(commands=['start'])
| {"golden_diff": "diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py\n--- a/telegram_utils/telegram_ui.py\n+++ b/telegram_utils/telegram_ui.py\n@@ -13,6 +13,8 @@\n See the License for the specific language governing permissions and\n limitations under the License.\n \"\"\"\n+from pathlib import Path\n+\n import telebot\n \n from deeppavlov.core.common.file import read_json\n@@ -23,7 +25,8 @@\n bot = telebot.TeleBot(token)\n \n model_name = type(model).__name__\n- models_info = read_json('../telegram_utils/models_info.json')\n+ config_path = Path(__file__).parent / 'models_info.json'\n+ models_info = read_json(str(config_path))\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n \n @bot.message_handler(commands=['start'])\n", "issue": "Downloading requirements\nI was trying to install deeppavlov and had a problem following the installation steps.\r\n\r\n1) There is no download.py file in root folder, it is in `deeppavlov/download.py`\r\n``` sh\r\npython download.py [-all] \r\n```\r\n\r\n2) Even if I use that file it outputs the error:\r\n``` sh\r\n(env) root@mysexyhost:~/work/ipavlov/DeepPavlov# python3 deeppavlov/download.py\r\n/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n from ._conv import register_converters as _register_converters\r\nUsing TensorFlow backend.\r\n2018-03-12 07:34:11.490 ERROR in 'deeppavlov.core.models.serializable'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.serializable logger, logging to the stderr. Error traceback:\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py\", line 32, in get_logger\r\n with open(log_config_path) as log_config_json:\r\nTypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')\r\n2018-03-12 07:34:11.491 ERROR in 'deeppavlov.core.models.keras_model'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.keras_model logger, logging to the stderr. Error traceback:\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py\", line 32, in get_logger\r\n with open(log_config_path) as log_config_json:\r\nTypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')\r\nTraceback (most recent call last):\r\n File \"deeppavlov/download.py\", line 24, in <module>\r\n from deeppavlov.core.data.utils import download, download_decompress\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/__init__.py\", line 1, in <module>\r\n import deeppavlov.core.models.keras_model\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py\", line 39, in <module>\r\n class KerasModel(NNModel, metaclass=TfModelMeta):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py\", line 143, in KerasModel\r\n sample_weight_mode=None, weighted_metrics=None, target_tensors=None):\r\n File \"/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/overrides/overrides.py\", line 70, in overrides\r\n method.__name__)\r\nAssertionError: No super class method found for \"load\"\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model([context])\n reply_message = str(pred[0])\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom pathlib import Path\n\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n config_path = Path(__file__).parent / 'models_info.json'\n models_info = read_json(str(config_path))\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model([context])\n reply_message = str(pred[0])\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}]} | 1,529 | 196 |
gh_patches_debug_19897 | rasdani/github-patches | git_diff | ARM-DOE__ACT-756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sunset Stamen maps in GeoDisplay and potentially replace
Stamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements.
https://github.com/SciTools/cartopy/pull/2266
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/plotting/geodisplay.py`
Content:
```
1 """
2 Stores the class for GeographicPlotDisplay.
3
4 """
5
6 import warnings
7
8 import matplotlib
9 import matplotlib.pyplot as plt
10 import numpy as np
11 import pandas as pd
12
13 from .plot import Display
14
15 try:
16 import cartopy.crs as ccrs
17 import cartopy.feature as cfeature
18 from cartopy.io import img_tiles
19
20 CARTOPY_AVAILABLE = True
21 except ImportError:
22 CARTOPY_AVAILABLE = False
23
24
25 class GeographicPlotDisplay(Display):
26 """
27 A class for making geographic tracer plot of aircraft, ship or other moving
28 platform plot.
29
30 This is inherited from the :func:`act.plotting.Display`
31 class and has therefore has the same attributes as that class.
32 See :func:`act.plotting.Display`
33 for more information. There are no additional attributes or parameters
34 to this class.
35
36 In order to create geographic plots, ACT needs the Cartopy package to be
37 installed on your system. More information about
38 Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
39
40 """
41
42 def __init__(self, ds, ds_name=None, **kwargs):
43 if not CARTOPY_AVAILABLE:
44 raise ImportError(
45 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
46 )
47 super().__init__(ds, ds_name, secondary_y_allowed=False, **kwargs)
48 if self.fig is None:
49 self.fig = plt.figure(**kwargs)
50
51 def geoplot(
52 self,
53 data_field=None,
54 lat_field='lat',
55 lon_field='lon',
56 dsname=None,
57 cbar_label=None,
58 title=None,
59 projection=None,
60 plot_buffer=0.08,
61 img_tile=None,
62 img_tile_args={},
63 tile=8,
64 stamen='terrain-background',
65 cartopy_feature=None,
66 cmap='rainbow',
67 text=None,
68 gridlines=True,
69 **kwargs,
70 ):
71 """
72 Creates a latitude and longitude plot of a time series data set with
73 data values indicated by color and described with a colorbar.
74 Latitude values must be in degree north (-90 to 90) and
75 longitude must be in degree east (-180 to 180).
76
77 Parameters
78 ----------
79 data_field : str
80 Name of data field in the dataset to plot.
81 lat_field : str
82 Name of latitude field in the dataset to use.
83 lon_field : str
84 Name of longitude field in the dataset to use.
85 dsname : str or None
86 The name of the datastream to plot. Set to None to make ACT
87 attempt to automatically determine this.
88 cbar_label : str
89 Label to use with colorbar. If set to None will attempt
90 to create label from long_name and units.
91 title : str
92 Plot title.
93 projection : cartopy.crs object
94 Project to use on plot. See
95 https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
96 plot_buffer : float
97 Buffer to add around data on plot in lat and lon dimension.
98 img_tile : str
99 Image to use for the plot background. Set to None to not use
100 background image. For all image background types, see:
101 https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
102 Default is None.
103 img_tile_args : dict
104 Keyword arguments for the chosen img_tile. These arguments can be
105 found for the corresponding img_tile here:
106 https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
107 Default is an empty dictionary.
108 tile : int
109 Tile zoom to use with background image. Higher number indicates
110 more resolution. A value of 8 is typical for a normal sonde plot.
111 cartopy_feature : list of str or str
112 Cartopy feature to add to plot.
113 cmap : str
114 Color map to use for colorbar.
115 text : dictionary
116 Dictionary of {text:[lon,lat]} to add to plot. Can have more
117 than one set of text to add.
118 gridlines : boolean
119 Use latitude and longitude gridlines.
120 **kwargs : keyword arguments
121 Any other keyword arguments that will be passed
122 into :func:`matplotlib.pyplot.scatter` when the figure
123 is made. See the matplotlib documentation for further details
124 on what keyword arguments are available.
125
126 """
127 if dsname is None and len(self._ds.keys()) > 1:
128 raise ValueError(
129 'You must choose a datastream when there are 2 '
130 'or more datasets in the GeographicPlotDisplay '
131 'object.'
132 )
133 elif dsname is None:
134 dsname = list(self._ds.keys())[0]
135
136 if data_field is None:
137 raise ValueError('You must enter the name of the data ' 'to be plotted.')
138
139 if projection is None:
140 if CARTOPY_AVAILABLE:
141 projection = ccrs.PlateCarree()
142
143 # Extract data from the dataset
144 try:
145 lat = self._ds[dsname][lat_field].values
146 except KeyError:
147 raise ValueError(
148 (
149 'You will need to provide the name of the '
150 "field if not '{}' to use for latitude "
151 'data.'
152 ).format(lat_field)
153 )
154 try:
155 lon = self._ds[dsname][lon_field].values
156 except KeyError:
157 raise ValueError(
158 (
159 'You will need to provide the name of the '
160 "field if not '{}' to use for longitude "
161 'data.'
162 ).format(lon_field)
163 )
164
165 # Set up metadata information for display on plot
166 if cbar_label is None:
167 try:
168 cbar_label = (
169 self._ds[dsname][data_field].attrs['long_name']
170 + ' ('
171 + self._ds[dsname][data_field].attrs['units']
172 + ')'
173 )
174 except KeyError:
175 cbar_label = data_field
176
177 lat_limits = [np.nanmin(lat), np.nanmax(lat)]
178 lon_limits = [np.nanmin(lon), np.nanmax(lon)]
179 box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
180 bx_buf = box_size * plot_buffer
181
182 lat_center = np.sum(lat_limits) / 2.0
183 lon_center = np.sum(lon_limits) / 2.0
184
185 lat_limits = [
186 lat_center - box_size / 2.0 - bx_buf,
187 lat_center + box_size / 2.0 + bx_buf,
188 ]
189 lon_limits = [
190 lon_center - box_size / 2.0 - bx_buf,
191 lon_center + box_size / 2.0 + bx_buf,
192 ]
193
194 data = self._ds[dsname][data_field].values
195
196 # Create base plot projection
197 ax = plt.axes(projection=projection)
198 plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
199 ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
200
201 if title is None:
202 try:
203 dim = list(self._ds[dsname][data_field].dims)
204 ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
205 date = ts.strftime('%Y-%m-%d')
206 time_str = ts.strftime('%H:%M:%S')
207 plt.title(' '.join([dsname, 'at', date, time_str]))
208 except NameError:
209 plt.title(dsname)
210 else:
211 plt.title(title)
212
213 if stamen and img_tile is None:
214 tiler = img_tiles.Stamen(stamen)
215 ax.add_image(tiler, tile)
216 warnings.warn(
217 "Stamen is deprecated in Cartopy and in future versions of ACT, "
218 "please use img_tile to specify the image background. ")
219 else:
220 if img_tile is not None:
221 tiler = getattr(img_tiles, img_tile)(**img_tile_args)
222 ax.add_image(tiler, tile)
223
224 colorbar_map = None
225 if cmap is not None:
226 colorbar_map = matplotlib.colormaps.get_cmap(cmap)
227 sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
228 cbar = plt.colorbar(sc)
229 cbar.ax.set_ylabel(cbar_label)
230 if cartopy_feature is not None:
231 if isinstance(cartopy_feature, str):
232 cartopy_feature = [cartopy_feature]
233 cartopy_feature = [ii.upper() for ii in cartopy_feature]
234 if 'STATES' in cartopy_feature:
235 ax.add_feature(cfeature.STATES.with_scale('10m'))
236 if 'LAND' in cartopy_feature:
237 ax.add_feature(cfeature.LAND)
238 if 'OCEAN' in cartopy_feature:
239 ax.add_feature(cfeature.OCEAN)
240 if 'COASTLINE' in cartopy_feature:
241 ax.add_feature(cfeature.COASTLINE)
242 if 'BORDERS' in cartopy_feature:
243 ax.add_feature(cfeature.BORDERS, linestyle=':')
244 if 'LAKES' in cartopy_feature:
245 ax.add_feature(cfeature.LAKES, alpha=0.5)
246 if 'RIVERS' in cartopy_feature:
247 ax.add_feature(cfeature.RIVERS)
248 if text is not None:
249 for label, location in text.items():
250 ax.plot(location[0], location[1], marker='*', color='black')
251 ax.text(location[0], location[1], label, color='black')
252
253 if gridlines:
254 if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
255 gl = ax.gridlines(
256 crs=projection,
257 draw_labels=True,
258 linewidth=1,
259 color='gray',
260 alpha=0.5,
261 linestyle='--',
262 )
263 gl.top_labels = False
264 gl.left_labels = True
265 gl.bottom_labels = True
266 gl.right_labels = False
267 gl.xlabel_style = {'size': 6, 'color': 'gray'}
268 gl.ylabel_style = {'size': 6, 'color': 'gray'}
269 else:
270 # Labels are only currently supported for PlateCarree and Mercator
271 gl = ax.gridlines(
272 draw_labels=False,
273 linewidth=1,
274 color='gray',
275 alpha=0.5,
276 linestyle='--',
277 )
278
279 return ax
280
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py
--- a/act/plotting/geodisplay.py
+++ b/act/plotting/geodisplay.py
@@ -61,7 +61,6 @@
img_tile=None,
img_tile_args={},
tile=8,
- stamen='terrain-background',
cartopy_feature=None,
cmap='rainbow',
text=None,
@@ -210,16 +209,9 @@
else:
plt.title(title)
- if stamen and img_tile is None:
- tiler = img_tiles.Stamen(stamen)
+ if img_tile is not None:
+ tiler = getattr(img_tiles, img_tile)(**img_tile_args)
ax.add_image(tiler, tile)
- warnings.warn(
- "Stamen is deprecated in Cartopy and in future versions of ACT, "
- "please use img_tile to specify the image background. ")
- else:
- if img_tile is not None:
- tiler = getattr(img_tiles, img_tile)(**img_tile_args)
- ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
| {"golden_diff": "diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py\n--- a/act/plotting/geodisplay.py\n+++ b/act/plotting/geodisplay.py\n@@ -61,7 +61,6 @@\n img_tile=None,\n img_tile_args={},\n tile=8,\n- stamen='terrain-background',\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n@@ -210,16 +209,9 @@\n else:\n plt.title(title)\n \n- if stamen and img_tile is None:\n- tiler = img_tiles.Stamen(stamen)\n+ if img_tile is not None:\n+ tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n ax.add_image(tiler, tile)\n- warnings.warn(\n- \"Stamen is deprecated in Cartopy and in future versions of ACT, \"\n- \"please use img_tile to specify the image background. \")\n- else:\n- if img_tile is not None:\n- tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n- ax.add_image(tiler, tile)\n \n colorbar_map = None\n if cmap is not None:\n", "issue": "Sunset Stamen maps in GeoDisplay and potentially replace\nStamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements.\r\n\r\nhttps://github.com/SciTools/cartopy/pull/2266 \n", "before_files": [{"content": "\"\"\"\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\nimport warnings\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\n\ntry:\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n from cartopy.io import img_tiles\n\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot.\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n\n def __init__(self, ds, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\n 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'\n )\n super().__init__(ds, ds_name, secondary_y_allowed=False, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(\n self,\n data_field=None,\n lat_field='lat',\n lon_field='lon',\n dsname=None,\n cbar_label=None,\n title=None,\n projection=None,\n plot_buffer=0.08,\n img_tile=None,\n img_tile_args={},\n tile=8,\n stamen='terrain-background',\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n gridlines=True,\n **kwargs,\n ):\n \"\"\"\n Creates a latitude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data field in the dataset to plot.\n lat_field : str\n Name of latitude field in the dataset to use.\n lon_field : str\n Name of longitude field in the dataset to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : cartopy.crs object\n Project to use on plot. See\n https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n img_tile : str\n Image to use for the plot background. Set to None to not use\n background image. For all image background types, see:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is None.\n img_tile_args : dict\n Keyword arguments for the chosen img_tile. These arguments can be\n found for the corresponding img_tile here:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is an empty dictionary.\n tile : int\n Tile zoom to use with background image. Higher number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n \"\"\"\n if dsname is None and len(self._ds.keys()) > 1:\n raise ValueError(\n 'You must choose a datastream when there are 2 '\n 'or more datasets in the GeographicPlotDisplay '\n 'object.'\n )\n elif dsname is None:\n dsname = list(self._ds.keys())[0]\n\n if data_field is None:\n raise ValueError('You must enter the name of the data ' 'to be plotted.')\n\n if projection is None:\n if CARTOPY_AVAILABLE:\n projection = ccrs.PlateCarree()\n\n # Extract data from the dataset\n try:\n lat = self._ds[dsname][lat_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for latitude \"\n 'data.'\n ).format(lat_field)\n )\n try:\n lon = self._ds[dsname][lon_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for longitude \"\n 'data.'\n ).format(lon_field)\n )\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._ds[dsname][data_field].attrs['long_name']\n + ' ('\n + self._ds[dsname][data_field].attrs['units']\n + ')'\n )\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.0\n lon_center = np.sum(lon_limits) / 2.0\n\n lat_limits = [\n lat_center - box_size / 2.0 - bx_buf,\n lat_center + box_size / 2.0 + bx_buf,\n ]\n lon_limits = [\n lon_center - box_size / 2.0 - bx_buf,\n lon_center + box_size / 2.0 + bx_buf,\n ]\n\n data = self._ds[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._ds[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if stamen and img_tile is None:\n tiler = img_tiles.Stamen(stamen)\n ax.add_image(tiler, tile)\n warnings.warn(\n \"Stamen is deprecated in Cartopy and in future versions of ACT, \"\n \"please use img_tile to specify the image background. \")\n else:\n if img_tile is not None:\n tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = matplotlib.colormaps.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n gl = ax.gridlines(\n crs=projection,\n draw_labels=True,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n gl.top_labels = False\n gl.left_labels = True\n gl.bottom_labels = True\n gl.right_labels = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n else:\n # Labels are only currently supported for PlateCarree and Mercator\n gl = ax.gridlines(\n draw_labels=False,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n\n return ax\n", "path": "act/plotting/geodisplay.py"}], "after_files": [{"content": "\"\"\"\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\nimport warnings\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\n\ntry:\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n from cartopy.io import img_tiles\n\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot.\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n\n def __init__(self, ds, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\n 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'\n )\n super().__init__(ds, ds_name, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(\n self,\n data_field=None,\n lat_field='lat',\n lon_field='lon',\n dsname=None,\n cbar_label=None,\n title=None,\n projection=None,\n plot_buffer=0.08,\n img_tile=None,\n img_tile_args={},\n tile=8,\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n gridlines=True,\n **kwargs,\n ):\n \"\"\"\n Creates a latitude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data field in the dataset to plot.\n lat_field : str\n Name of latitude field in the dataset to use.\n lon_field : str\n Name of longitude field in the dataset to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : cartopy.crs object\n Project to use on plot. See\n https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n img_tile : str\n Image to use for the plot background. Set to None to not use\n background image. For all image background types, see:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is None.\n img_tile_args : dict\n Keyword arguments for the chosen img_tile. These arguments can be\n found for the corresponding img_tile here:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is an empty dictionary.\n tile : int\n Tile zoom to use with background image. Higher number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n \"\"\"\n if dsname is None and len(self._ds.keys()) > 1:\n raise ValueError(\n 'You must choose a datastream when there are 2 '\n 'or more datasets in the GeographicPlotDisplay '\n 'object.'\n )\n elif dsname is None:\n dsname = list(self._ds.keys())[0]\n\n if data_field is None:\n raise ValueError('You must enter the name of the data ' 'to be plotted.')\n\n if projection is None:\n if CARTOPY_AVAILABLE:\n projection = ccrs.PlateCarree()\n\n # Extract data from the dataset\n try:\n lat = self._ds[dsname][lat_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for latitude \"\n 'data.'\n ).format(lat_field)\n )\n try:\n lon = self._ds[dsname][lon_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for longitude \"\n 'data.'\n ).format(lon_field)\n )\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._ds[dsname][data_field].attrs['long_name']\n + ' ('\n + self._ds[dsname][data_field].attrs['units']\n + ')'\n )\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.0\n lon_center = np.sum(lon_limits) / 2.0\n\n lat_limits = [\n lat_center - box_size / 2.0 - bx_buf,\n lat_center + box_size / 2.0 + bx_buf,\n ]\n lon_limits = [\n lon_center - box_size / 2.0 - bx_buf,\n lon_center + box_size / 2.0 + bx_buf,\n ]\n\n data = self._ds[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._ds[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if img_tile is not None:\n tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = matplotlib.colormaps.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n gl = ax.gridlines(\n crs=projection,\n draw_labels=True,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n gl.top_labels = False\n gl.left_labels = True\n gl.bottom_labels = True\n gl.right_labels = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n else:\n # Labels are only currently supported for PlateCarree and Mercator\n gl = ax.gridlines(\n draw_labels=False,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n\n return ax\n", "path": "act/plotting/geodisplay.py"}]} | 3,356 | 274 |
gh_patches_debug_38229 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1018 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
elasticsearch span_name not correctly
**Steps to reproduce**
Describe exactly how to reproduce the error. Include a code sample if applicable.
**What is the expected behavior?**
Our index is created on a rolling basis
Therefore, we will use a specific time to query the index when querying, and when the plugin generates span_name, it will bring the unified index, so that span_name cannot be described as an operation. As follows
```
Elasticsearch/bkfta_action_20220228_read,bkfta_action_20220301_read,bkfta_action_20220302_read,bkfta_action_20220303_read,bkfta_action_20220304_read,bkfta_action_20220305_read,bkfta_action_20220306_read,bkfta_action_20220307_read/_search
```
**What is the actual behavior?**
span_name shuld be
```
Elasticsearch/{index_name}/_search
```
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing HTTP elasticsearch made by the
17 `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
25 import elasticsearch
26
27
28 # instrument elasticsearch
29 ElasticsearchInstrumentor().instrument()
30
31 # Using elasticsearch as normal now will automatically generate spans
32 es = elasticsearch.Elasticsearch()
33 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
34 es.get(index='my-index', doc_type='my-type', id=1)
35
36 Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
37 can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`
38 environment variable or by passing the prefix as an argument to the instrumentor. For example,
39
40
41 .. code-block:: python
42
43 ElasticsearchInstrumentor("my-custom-prefix").instrument()
44
45
46 The `instrument` method accepts the following keyword args:
47
48 tracer_provider (TracerProvider) - an optional tracer provider
49 request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
50 this function signature is:
51 def request_hook(span: Span, method: str, url: str, kwargs)
52 response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
53 this function signature is:
54 def response_hook(span: Span, response: dict)
55
56 for example:
57
58 .. code: python
59
60 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
61 import elasticsearch
62
63 def request_hook(span, method, url, kwargs):
64 if span and span.is_recording():
65 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
66
67 def response_hook(span, response):
68 if span and span.is_recording():
69 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
70
71 # instrument elasticsearch with request and response hooks
72 ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
73
74 # Using elasticsearch as normal now will automatically generate spans,
75 # including user custom attributes added from the hooks
76 es = elasticsearch.Elasticsearch()
77 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
78 es.get(index='my-index', doc_type='my-type', id=1)
79
80 API
81 ---
82 """
83
84 import re
85 from logging import getLogger
86 from os import environ
87 from typing import Collection
88
89 import elasticsearch
90 import elasticsearch.exceptions
91 from wrapt import wrap_function_wrapper as _wrap
92
93 from opentelemetry.instrumentation.elasticsearch.package import _instruments
94 from opentelemetry.instrumentation.elasticsearch.version import __version__
95 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
96 from opentelemetry.instrumentation.utils import unwrap
97 from opentelemetry.semconv.trace import SpanAttributes
98 from opentelemetry.trace import SpanKind, get_tracer
99
100 logger = getLogger(__name__)
101
102
103 # Values to add as tags from the actual
104 # payload returned by Elasticsearch, if any.
105 _ATTRIBUTES_FROM_RESULT = [
106 "found",
107 "timed_out",
108 "took",
109 ]
110
111 _DEFAULT_OP_NAME = "request"
112
113
114 class ElasticsearchInstrumentor(BaseInstrumentor):
115 """An instrumentor for elasticsearch
116 See `BaseInstrumentor`
117 """
118
119 def __init__(self, span_name_prefix=None):
120 if not span_name_prefix:
121 span_name_prefix = environ.get(
122 "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX",
123 "Elasticsearch",
124 )
125 self._span_name_prefix = span_name_prefix.strip()
126 super().__init__()
127
128 def instrumentation_dependencies(self) -> Collection[str]:
129 return _instruments
130
131 def _instrument(self, **kwargs):
132 """
133 Instruments elasticsarch module
134 """
135 tracer_provider = kwargs.get("tracer_provider")
136 tracer = get_tracer(__name__, __version__, tracer_provider)
137 request_hook = kwargs.get("request_hook")
138 response_hook = kwargs.get("response_hook")
139 _wrap(
140 elasticsearch,
141 "Transport.perform_request",
142 _wrap_perform_request(
143 tracer, self._span_name_prefix, request_hook, response_hook
144 ),
145 )
146
147 def _uninstrument(self, **kwargs):
148 unwrap(elasticsearch.Transport, "perform_request")
149
150
151 _regex_doc_url = re.compile(r"/_doc/([^/]+)")
152
153
154 def _wrap_perform_request(
155 tracer, span_name_prefix, request_hook=None, response_hook=None
156 ):
157 # pylint: disable=R0912
158 def wrapper(wrapped, _, args, kwargs):
159 method = url = None
160 try:
161 method, url, *_ = args
162 except IndexError:
163 logger.warning(
164 "expected perform_request to receive two positional arguments. "
165 "Got %d",
166 len(args),
167 )
168
169 op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
170 doc_id = None
171 if url:
172 # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
173 # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
174 # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708
175 match = _regex_doc_url.search(url)
176 if match is not None:
177 # Remove the full document ID from the URL
178 doc_span = match.span()
179 op_name = (
180 span_name_prefix
181 + url[: doc_span[0]]
182 + "/_doc/:id"
183 + url[doc_span[1] :]
184 )
185 # Put the document ID in attributes
186 doc_id = match.group(1)
187 params = kwargs.get("params", {})
188 body = kwargs.get("body", None)
189
190 with tracer.start_as_current_span(
191 op_name,
192 kind=SpanKind.CLIENT,
193 ) as span:
194
195 if callable(request_hook):
196 request_hook(span, method, url, kwargs)
197
198 if span.is_recording():
199 attributes = {
200 SpanAttributes.DB_SYSTEM: "elasticsearch",
201 }
202 if url:
203 attributes["elasticsearch.url"] = url
204 if method:
205 attributes["elasticsearch.method"] = method
206 if body:
207 attributes[SpanAttributes.DB_STATEMENT] = str(body)
208 if params:
209 attributes["elasticsearch.params"] = str(params)
210 if doc_id:
211 attributes["elasticsearch.id"] = doc_id
212 for key, value in attributes.items():
213 span.set_attribute(key, value)
214
215 rv = wrapped(*args, **kwargs)
216 if isinstance(rv, dict) and span.is_recording():
217 for member in _ATTRIBUTES_FROM_RESULT:
218 if member in rv:
219 span.set_attribute(
220 f"elasticsearch.{member}",
221 str(rv[member]),
222 )
223
224 if callable(response_hook):
225 response_hook(span, rv)
226 return rv
227
228 return wrapper
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
@@ -150,11 +150,14 @@
_regex_doc_url = re.compile(r"/_doc/([^/]+)")
+# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
+_regex_search_url = re.compile(r"/([^/]+)/_search[/]?")
+
def _wrap_perform_request(
tracer, span_name_prefix, request_hook=None, response_hook=None
):
- # pylint: disable=R0912
+ # pylint: disable=R0912,R0914
def wrapper(wrapped, _, args, kwargs):
method = url = None
try:
@@ -167,7 +170,10 @@
)
op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
+
doc_id = None
+ search_target = None
+
if url:
# TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
# A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
@@ -184,6 +190,11 @@
)
# Put the document ID in attributes
doc_id = match.group(1)
+ match = _regex_search_url.search(url)
+ if match is not None:
+ op_name = span_name_prefix + "/<target>/_search"
+ search_target = match.group(1)
+
params = kwargs.get("params", {})
body = kwargs.get("body", None)
@@ -209,6 +220,8 @@
attributes["elasticsearch.params"] = str(params)
if doc_id:
attributes["elasticsearch.id"] = doc_id
+ if search_target:
+ attributes["elasticsearch.target"] = search_target
for key, value in attributes.items():
span.set_attribute(key, value)
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n@@ -150,11 +150,14 @@\n \n _regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n \n+# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n+_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n+\n \n def _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n ):\n- # pylint: disable=R0912\n+ # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n@@ -167,7 +170,10 @@\n )\n \n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n+\n doc_id = None\n+ search_target = None\n+\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n@@ -184,6 +190,11 @@\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n+ match = _regex_search_url.search(url)\n+ if match is not None:\n+ op_name = span_name_prefix + \"/<target>/_search\"\n+ search_target = match.group(1)\n+\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n \n@@ -209,6 +220,8 @@\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n+ if search_target:\n+ attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n", "issue": "elasticsearch span_name not correctly\n\r\n**Steps to reproduce**\r\nDescribe exactly how to reproduce the error. Include a code sample if applicable.\r\n\r\n**What is the expected behavior?**\r\nOur index is created on a rolling basis\r\nTherefore, we will use a specific time to query the index when querying, and when the plugin generates span_name, it will bring the unified index, so that span_name cannot be described as an operation. As follows\r\n```\r\nElasticsearch/bkfta_action_20220228_read,bkfta_action_20220301_read,bkfta_action_20220302_read,bkfta_action_20220303_read,bkfta_action_20220304_read,bkfta_action_20220305_read,bkfta_action_20220306_read,bkfta_action_20220307_read/_search\r\n```\r\n**What is the actual behavior?**\r\nspan_name shuld be\r\n```\r\nElasticsearch/{index_name}/_search\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\n\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n this function signature is:\n def response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments elasticsarch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer, self._span_name_prefix, request_hook, response_hook\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n\ndef _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n):\n # pylint: disable=R0912\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n doc_id = None\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\n\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n this function signature is:\n def response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments elasticsarch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer, self._span_name_prefix, request_hook, response_hook\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n\n\ndef _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n):\n # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n\n doc_id = None\n search_target = None\n\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n match = _regex_search_url.search(url)\n if match is not None:\n op_name = span_name_prefix + \"/<target>/_search\"\n search_target = match.group(1)\n\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n if search_target:\n attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}]} | 2,794 | 548 |
gh_patches_debug_11304 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add example code for overlay segment configuration for workstation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/workstation_configs/forms.py`
Content:
```
1 from django.forms import ModelForm
2
3 from grandchallenge.core.forms import SaveFormInitMixin
4 from grandchallenge.core.widgets import JSONEditorWidget
5 from grandchallenge.workstation_configs.models import (
6 OVERLAY_SEGMENTS_SCHEMA,
7 WorkstationConfig,
8 )
9
10
11 class WorkstationConfigForm(SaveFormInitMixin, ModelForm):
12 class Meta:
13 model = WorkstationConfig
14 fields = (
15 "title",
16 "description",
17 "window_presets",
18 "default_window_preset",
19 "default_slab_thickness_mm",
20 "default_slab_render_method",
21 "default_orientation",
22 "default_overlay_alpha",
23 "default_overlay_lut",
24 "default_overlay_interpolation",
25 "overlay_segments",
26 "default_zoom_scale",
27 "show_image_info_plugin",
28 "show_display_plugin",
29 "show_invert_tool",
30 "show_flip_tool",
31 "show_window_level_tool",
32 "show_reset_tool",
33 )
34 widgets = {
35 "overlay_segments": JSONEditorWidget(
36 schema=OVERLAY_SEGMENTS_SCHEMA
37 ),
38 }
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py
--- a/app/grandchallenge/workstation_configs/forms.py
+++ b/app/grandchallenge/workstation_configs/forms.py
@@ -36,3 +36,14 @@
schema=OVERLAY_SEGMENTS_SCHEMA
),
}
+ help_texts = {
+ "overlay_segments": (
+ "If an categorical overlay is shown, it is possible to show toggles "
+ "to change the visibility of the different overlay categories. To do "
+ "so, configure the categories that should be displayed. Data from the"
+ " algorithm's output.json can be added as an extra label to each "
+ "toggle using jinja templating. "
+ 'For example: [{ "voxel_value": 0, "name": "Level 0", "visible": '
+ 'false, "metric_template": "{{metrics.volumes[0]}} mm³"},]'
+ ),
+ }
| {"golden_diff": "diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py\n--- a/app/grandchallenge/workstation_configs/forms.py\n+++ b/app/grandchallenge/workstation_configs/forms.py\n@@ -36,3 +36,14 @@\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n+ help_texts = {\n+ \"overlay_segments\": (\n+ \"If an categorical overlay is shown, it is possible to show toggles \"\n+ \"to change the visibility of the different overlay categories. To do \"\n+ \"so, configure the categories that should be displayed. Data from the\"\n+ \" algorithm's output.json can be added as an extra label to each \"\n+ \"toggle using jinja templating. \"\n+ 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n+ 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n+ ),\n+ }\n", "issue": "Add example code for overlay segment configuration for workstation\n\n", "before_files": [{"content": "from django.forms import ModelForm\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n class Meta:\n model = WorkstationConfig\n fields = (\n \"title\",\n \"description\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_overlay_alpha\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"default_zoom_scale\",\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n )\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}], "after_files": [{"content": "from django.forms import ModelForm\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n class Meta:\n model = WorkstationConfig\n fields = (\n \"title\",\n \"description\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_overlay_alpha\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"default_zoom_scale\",\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n )\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n help_texts = {\n \"overlay_segments\": (\n \"If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n ),\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}]} | 564 | 220 |
gh_patches_debug_15232 | rasdani/github-patches | git_diff | pre-commit__pre-commit-914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
C:\python3\python.exe does not exist while running pre-commit on Windows
Hi,
I'm trying to run the pre-commit commands of the [ESSS/barril](https://github.com/ESSS/barril) repository on my Windows machine, but I'm getting this error:
```
λ tox -e linting
linting installed: aspy.yaml==1.1.1,cfgv==1.4.0,identify==1.1.8,importlib-metadata==0.8,importlib-resources==1.0.2,nodeenv==1.3.3,pre-commit==1.14.0,PyYAML==3.13,six==1.12.0,toml==0.10.0,virtualenv==16.2.0,zipp==0.3.3
linting run-test-pre: PYTHONHASHSEED='554'
linting runtests: commands[0] | pre-commit run --all-files --show-diff-on-failure
[INFO] Installing environment for https://github.com/ambv/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: Command: ('w:\\projects\\barril\\.tox\\linting\\scripts\\python.exe', '-mvirtualenv', 'C:\\Users\\darci\\.cache\\pre-commit\\repoufam_s8_\\py_env-python3', '-p', 'C:\\python3\\python.exe')
Return code: 3
Expected return code: 0
Output:
The path C:\python3\python.exe (from --python=C:\python3\python.exe) does not exist
Errors: (none)
Check the log at C:\Users\darci/.cache\pre-commit\pre-commit.log
```
The `C:\python3` path does not exist on my machine, I'm using `conda` and `virtualenv` to create a Python 3.6 virtual environment, from which I execute `tox -e linting`.
Here's the contents of the `pre-commit-config.yaml`:
```yaml
repos:
- repo: https://github.com/ambv/black
rev: 18.6b4
hooks:
- id: black
args: [--safe, --quiet]
language_version: python3
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v1.3.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: debug-statements
- id: flake8
- repo: local
hooks:
- id: rst
name: rst
entry: rst-lint --encoding utf-8
files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst)$
language: python
additional_dependencies: [pygments, restructuredtext_lint]
```
Any hints of what the problem might be?
cc @nicoddemus
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/python.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os
5 import sys
6
7 import pre_commit.constants as C
8 from pre_commit.envcontext import envcontext
9 from pre_commit.envcontext import UNSET
10 from pre_commit.envcontext import Var
11 from pre_commit.languages import helpers
12 from pre_commit.parse_shebang import find_executable
13 from pre_commit.util import CalledProcessError
14 from pre_commit.util import clean_path_on_failure
15 from pre_commit.util import cmd_output
16
17
18 ENVIRONMENT_DIR = 'py_env'
19
20
21 def bin_dir(venv):
22 """On windows there's a different directory for the virtualenv"""
23 bin_part = 'Scripts' if os.name == 'nt' else 'bin'
24 return os.path.join(venv, bin_part)
25
26
27 def get_env_patch(venv):
28 return (
29 ('PYTHONHOME', UNSET),
30 ('VIRTUAL_ENV', venv),
31 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
32 )
33
34
35 def _find_by_py_launcher(version): # pragma: no cover (windows only)
36 if version.startswith('python'):
37 try:
38 return cmd_output(
39 'py', '-{}'.format(version[len('python'):]),
40 '-c', 'import sys; print(sys.executable)',
41 )[1].strip()
42 except CalledProcessError:
43 pass
44
45
46 def _get_default_version(): # pragma: no cover (platform dependent)
47 def _norm(path):
48 _, exe = os.path.split(path.lower())
49 exe, _, _ = exe.partition('.exe')
50 if find_executable(exe) and exe not in {'python', 'pythonw'}:
51 return exe
52
53 # First attempt from `sys.executable` (or the realpath)
54 # On linux, I see these common sys.executables:
55 #
56 # system `python`: /usr/bin/python -> python2.7
57 # system `python2`: /usr/bin/python2 -> python2.7
58 # virtualenv v: v/bin/python (will not return from this loop)
59 # virtualenv v -ppython2: v/bin/python -> python2
60 # virtualenv v -ppython2.7: v/bin/python -> python2.7
61 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
62 for path in {sys.executable, os.path.realpath(sys.executable)}:
63 exe = _norm(path)
64 if exe:
65 return exe
66
67 # Next try the `pythonX.X` executable
68 exe = 'python{}.{}'.format(*sys.version_info)
69 if find_executable(exe):
70 return exe
71
72 if _find_by_py_launcher(exe):
73 return exe
74
75 # Give a best-effort try for windows
76 if os.path.exists(r'C:\{}\python.exe'.format(exe.replace('.', ''))):
77 return exe
78
79 # We tried!
80 return C.DEFAULT
81
82
83 def get_default_version():
84 # TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`
85 try:
86 return get_default_version.cached_version
87 except AttributeError:
88 get_default_version.cached_version = _get_default_version()
89 return get_default_version()
90
91
92 def norm_version(version):
93 if os.name == 'nt': # pragma: no cover (windows)
94 # Try looking up by name
95 version_exec = find_executable(version)
96 if version_exec and version_exec != version:
97 return version_exec
98
99 version_exec = _find_by_py_launcher(version)
100 if version_exec:
101 return version_exec
102
103 # If it is in the form pythonx.x search in the default
104 # place on windows
105 if version.startswith('python'):
106 return r'C:\{}\python.exe'.format(version.replace('.', ''))
107
108 # Otherwise assume it is a path
109 return os.path.expanduser(version)
110
111
112 def py_interface(_dir, _make_venv):
113 @contextlib.contextmanager
114 def in_env(prefix, language_version):
115 envdir = prefix.path(helpers.environment_dir(_dir, language_version))
116 with envcontext(get_env_patch(envdir)):
117 yield
118
119 def healthy(prefix, language_version):
120 with in_env(prefix, language_version):
121 retcode, _, _ = cmd_output(
122 'python', '-c',
123 'import ctypes, datetime, io, os, ssl, weakref',
124 retcode=None,
125 )
126 return retcode == 0
127
128 def run_hook(hook, file_args):
129 with in_env(hook.prefix, hook.language_version):
130 return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
131
132 def install_environment(prefix, version, additional_dependencies):
133 additional_dependencies = tuple(additional_dependencies)
134 directory = helpers.environment_dir(_dir, version)
135
136 env_dir = prefix.path(directory)
137 with clean_path_on_failure(env_dir):
138 if version != C.DEFAULT:
139 python = norm_version(version)
140 else:
141 python = os.path.realpath(sys.executable)
142 _make_venv(env_dir, python)
143 with in_env(prefix, version):
144 helpers.run_setup_cmd(
145 prefix, ('pip', 'install', '.') + additional_dependencies,
146 )
147
148 return in_env, healthy, run_hook, install_environment
149
150
151 def make_venv(envdir, python):
152 env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')
153 cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)
154 cmd_output(*cmd, env=env, cwd='/')
155
156
157 _interface = py_interface(ENVIRONMENT_DIR, make_venv)
158 in_env, healthy, run_hook, install_environment = _interface
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -89,8 +89,26 @@
return get_default_version()
+def _sys_executable_matches(version):
+ if version == 'python':
+ return True
+ elif not version.startswith('python'):
+ return False
+
+ try:
+ info = tuple(int(p) for p in version[len('python'):].split('.'))
+ except ValueError:
+ return False
+
+ return sys.version_info[:len(info)] == info
+
+
def norm_version(version):
if os.name == 'nt': # pragma: no cover (windows)
+ # first see if our current executable is appropriate
+ if _sys_executable_matches(version):
+ return sys.executable
+
# Try looking up by name
version_exec = find_executable(version)
if version_exec and version_exec != version:
| {"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -89,8 +89,26 @@\n return get_default_version()\n \n \n+def _sys_executable_matches(version):\n+ if version == 'python':\n+ return True\n+ elif not version.startswith('python'):\n+ return False\n+\n+ try:\n+ info = tuple(int(p) for p in version[len('python'):].split('.'))\n+ except ValueError:\n+ return False\n+\n+ return sys.version_info[:len(info)] == info\n+\n+\n def norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n+ # first see if our current executable is appropriate\n+ if _sys_executable_matches(version):\n+ return sys.executable\n+\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n", "issue": "C:\\python3\\python.exe does not exist while running pre-commit on Windows\nHi,\r\n\r\nI'm trying to run the pre-commit commands of the [ESSS/barril](https://github.com/ESSS/barril) repository on my Windows machine, but I'm getting this error:\r\n\r\n```\r\n \u03bb tox -e linting\r\nlinting installed: aspy.yaml==1.1.1,cfgv==1.4.0,identify==1.1.8,importlib-metadata==0.8,importlib-resources==1.0.2,nodeenv==1.3.3,pre-commit==1.14.0,PyYAML==3.13,six==1.12.0,toml==0.10.0,virtualenv==16.2.0,zipp==0.3.3\r\nlinting run-test-pre: PYTHONHASHSEED='554'\r\nlinting runtests: commands[0] | pre-commit run --all-files --show-diff-on-failure\r\n[INFO] Installing environment for https://github.com/ambv/black.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('w:\\\\projects\\\\barril\\\\.tox\\\\linting\\\\scripts\\\\python.exe', '-mvirtualenv', 'C:\\\\Users\\\\darci\\\\.cache\\\\pre-commit\\\\repoufam_s8_\\\\py_env-python3', '-p', 'C:\\\\python3\\\\python.exe')\r\nReturn code: 3\r\nExpected return code: 0\r\nOutput:\r\n The path C:\\python3\\python.exe (from --python=C:\\python3\\python.exe) does not exist\r\n\r\nErrors: (none)\r\n\r\nCheck the log at C:\\Users\\darci/.cache\\pre-commit\\pre-commit.log\r\n```\r\n\r\nThe `C:\\python3` path does not exist on my machine, I'm using `conda` and `virtualenv` to create a Python 3.6 virtual environment, from which I execute `tox -e linting`.\r\n\r\nHere's the contents of the `pre-commit-config.yaml`:\r\n\r\n```yaml\r\nrepos:\r\n- repo: https://github.com/ambv/black\r\n rev: 18.6b4\r\n hooks:\r\n - id: black\r\n args: [--safe, --quiet]\r\n language_version: python3\r\n- repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: v1.3.0\r\n hooks:\r\n - id: trailing-whitespace\r\n - id: end-of-file-fixer\r\n - id: check-yaml\r\n - id: debug-statements\r\n - id: flake8\r\n- repo: local\r\n hooks:\r\n - id: rst\r\n name: rst\r\n entry: rst-lint --encoding utf-8\r\n files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst)$\r\n language: python\r\n additional_dependencies: [pygments, restructuredtext_lint]\r\n```\r\n\r\nAny hints of what the problem might be?\r\n\r\ncc @nicoddemus \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv):\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv):\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(version): # pragma: no cover (windows only)\n if version.startswith('python'):\n try:\n return cmd_output(\n 'py', '-{}'.format(version[len('python'):]),\n '-c', 'import sys; print(sys.executable)',\n )[1].strip()\n except CalledProcessError:\n pass\n\n\ndef _get_default_version(): # pragma: no cover (platform dependent)\n def _norm(path):\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if find_executable(exe) and exe not in {'python', 'pythonw'}:\n return exe\n\n # First attempt from `sys.executable` (or the realpath)\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in {sys.executable, os.path.realpath(sys.executable)}:\n exe = _norm(path)\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = 'python{}.{}'.format(*sys.version_info)\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n if os.path.exists(r'C:\\{}\\python.exe'.format(exe.replace('.', ''))):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef get_default_version():\n # TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`\n try:\n return get_default_version.cached_version\n except AttributeError:\n get_default_version.cached_version = _get_default_version()\n return get_default_version()\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef py_interface(_dir, _make_venv):\n @contextlib.contextmanager\n def in_env(prefix, language_version):\n envdir = prefix.path(helpers.environment_dir(_dir, language_version))\n with envcontext(get_env_patch(envdir)):\n yield\n\n def healthy(prefix, language_version):\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output(\n 'python', '-c',\n 'import ctypes, datetime, io, os, ssl, weakref',\n retcode=None,\n )\n return retcode == 0\n\n def run_hook(hook, file_args):\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n\n def install_environment(prefix, version, additional_dependencies):\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n if version != C.DEFAULT:\n python = norm_version(version)\n else:\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('pip', 'install', '.') + additional_dependencies,\n )\n\n return in_env, healthy, run_hook, install_environment\n\n\ndef make_venv(envdir, python):\n env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')\n cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n cmd_output(*cmd, env=env, cwd='/')\n\n\n_interface = py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv):\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv):\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(version): # pragma: no cover (windows only)\n if version.startswith('python'):\n try:\n return cmd_output(\n 'py', '-{}'.format(version[len('python'):]),\n '-c', 'import sys; print(sys.executable)',\n )[1].strip()\n except CalledProcessError:\n pass\n\n\ndef _get_default_version(): # pragma: no cover (platform dependent)\n def _norm(path):\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if find_executable(exe) and exe not in {'python', 'pythonw'}:\n return exe\n\n # First attempt from `sys.executable` (or the realpath)\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in {sys.executable, os.path.realpath(sys.executable)}:\n exe = _norm(path)\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = 'python{}.{}'.format(*sys.version_info)\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n if os.path.exists(r'C:\\{}\\python.exe'.format(exe.replace('.', ''))):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef get_default_version():\n # TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`\n try:\n return get_default_version.cached_version\n except AttributeError:\n get_default_version.cached_version = _get_default_version()\n return get_default_version()\n\n\ndef _sys_executable_matches(version):\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef py_interface(_dir, _make_venv):\n @contextlib.contextmanager\n def in_env(prefix, language_version):\n envdir = prefix.path(helpers.environment_dir(_dir, language_version))\n with envcontext(get_env_patch(envdir)):\n yield\n\n def healthy(prefix, language_version):\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output(\n 'python', '-c',\n 'import ctypes, datetime, io, os, ssl, weakref',\n retcode=None,\n )\n return retcode == 0\n\n def run_hook(hook, file_args):\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n\n def install_environment(prefix, version, additional_dependencies):\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n if version != C.DEFAULT:\n python = norm_version(version)\n else:\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('pip', 'install', '.') + additional_dependencies,\n )\n\n return in_env, healthy, run_hook, install_environment\n\n\ndef make_venv(envdir, python):\n env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')\n cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n cmd_output(*cmd, env=env, cwd='/')\n\n\n_interface = py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python.py"}]} | 2,538 | 222 |
gh_patches_debug_25903 | rasdani/github-patches | git_diff | docarray__docarray-296 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expose Timeout Parameters in weaviate
Weaviate storage currently does not allow specifying the Timeout parameters.
Add the parameter to `WeaviateConfig` and used when creating the weaviate client
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docarray/array/storage/weaviate/backend.py`
Content:
```
1 import uuid
2 from dataclasses import dataclass, field
3 from typing import (
4 Iterable,
5 Dict,
6 Optional,
7 TYPE_CHECKING,
8 Union,
9 Tuple,
10 List,
11 )
12
13 import numpy as np
14 import weaviate
15
16 from ..base.backend import BaseBackendMixin
17 from ..registry import _REGISTRY
18 from .... import Document
19 from ....helper import dataclass_from_dict, filter_dict
20
21 if TYPE_CHECKING:
22 from ....typing import DocumentArraySourceType, ArrayType
23
24
25 @dataclass
26 class WeaviateConfig:
27 """This class stores the config variables to initialize
28 connection to the Weaviate server"""
29
30 host: Optional[str] = field(default='localhost')
31 port: Optional[int] = field(default=8080)
32 protocol: Optional[str] = field(default='http')
33 name: Optional[str] = None
34 serialize_config: Dict = field(default_factory=dict)
35 n_dim: Optional[int] = None # deprecated, not used anymore since weaviate 1.10
36 ef: Optional[int] = None
37 ef_construction: Optional[int] = None
38 max_connections: Optional[int] = None
39
40
41 class BackendMixin(BaseBackendMixin):
42 """Provide necessary functions to enable this storage backend."""
43
44 def _init_storage(
45 self,
46 _docs: Optional['DocumentArraySourceType'] = None,
47 config: Optional[Union[WeaviateConfig, Dict]] = None,
48 **kwargs,
49 ):
50 """Initialize weaviate storage.
51
52 :param docs: the list of documents to initialize to
53 :param config: the config object used to ininitialize connection to weaviate server
54 :param kwargs: extra keyword arguments
55 :raises ValueError: only one of name or docs can be used for initialization,
56 raise an error if both are provided
57 """
58
59 if not config:
60 config = WeaviateConfig()
61 elif isinstance(config, dict):
62 config = dataclass_from_dict(WeaviateConfig, config)
63
64 self._serialize_config = config.serialize_config
65
66 if config.name and config.name != config.name.capitalize():
67 raise ValueError(
68 'Weaviate class name has to be capitalized. '
69 'Please capitalize when declaring the name field in config.'
70 )
71
72 self._persist = bool(config.name)
73
74 self._client = weaviate.Client(
75 f'{config.protocol}://{config.host}:{config.port}'
76 )
77 self._config = config
78
79 self._schemas = self._load_or_create_weaviate_schema()
80
81 _REGISTRY[self.__class__.__name__][self._class_name].append(self)
82
83 super()._init_storage(_docs, **kwargs)
84
85 # To align with Sqlite behavior; if `docs` is not `None` and table name
86 # is provided, :class:`DocumentArraySqlite` will clear the existing
87 # table and load the given `docs`
88 if _docs is None:
89 return
90 elif isinstance(_docs, Iterable):
91 self.clear()
92 self.extend(_docs)
93 else:
94 self.clear()
95 if isinstance(_docs, Document):
96 self.append(_docs)
97
98 def _get_weaviate_class_name(self) -> str:
99 """Generate the class/schema name using the ``uuid1`` module with some
100 formatting to tailor to weaviate class name convention
101
102 :return: string representing the name of weaviate class/schema name of
103 this :class:`DocumentArrayWeaviate` object
104 """
105 return ''.join([i for i in uuid.uuid1().hex if not i.isdigit()]).capitalize()
106
107 def _get_schema_by_name(self, cls_name: str) -> Dict:
108 """Return the schema dictionary object with the class name
109 Content of the all dictionaries by this method are the same except the name
110 of the weaviate's ``class``
111
112 :param cls_name: the name of the schema/class in weaviate
113 :return: the schema dictionary
114 """
115 # TODO: ideally we should only use one schema. this will allow us to deal with
116 # consistency better
117 hnsw_config = {
118 'ef': self._config.ef,
119 'efConstruction': self._config.ef_construction,
120 'maxConnections': self._config.max_connections,
121 }
122
123 return {
124 'classes': [
125 {
126 'class': cls_name,
127 "vectorizer": "none",
128 'vectorIndexConfig': {'skip': False},
129 'properties': [
130 {
131 'dataType': ['blob'],
132 'name': '_serialized',
133 'indexInverted': False,
134 },
135 ],
136 },
137 {
138 'class': cls_name + 'Meta',
139 "vectorizer": "none",
140 'vectorIndexConfig': {'skip': True, **filter_dict(hnsw_config)},
141 'properties': [
142 {
143 'dataType': ['string[]'],
144 'name': '_offset2ids',
145 'indexInverted': False,
146 },
147 ],
148 },
149 ]
150 }
151
152 def _load_or_create_weaviate_schema(self):
153 """Create a new weaviate schema for this :class:`DocumentArrayWeaviate` object
154 if not present in weaviate or if ``self._config.name`` is None. If ``self._config.name``
155 is provided and not None and schema with the specified name exists in weaviate,
156 then load the object with the given ``self._config.name``
157
158 :return: the schemas of this :class`DocumentArrayWeaviate` object and its meta
159 """
160 if not self._config.name:
161 name_candidate = self._get_weaviate_class_name()
162 doc_schemas = self._get_schema_by_name(name_candidate)
163 while self._client.schema.contains(doc_schemas):
164 name_candidate = self._get_weaviate_class_name()
165 doc_schemas = self._get_schema_by_name(name_candidate)
166 self._client.schema.create(doc_schemas)
167 self._config.name = name_candidate
168 return doc_schemas
169
170 doc_schemas = self._get_schema_by_name(self._config.name)
171 if self._client.schema.contains(doc_schemas):
172 return doc_schemas
173
174 self._client.schema.create(doc_schemas)
175 return doc_schemas
176
177 def _update_offset2ids_meta(self):
178 """Update the offset2ids in weaviate the the current local version"""
179 if self._offset2ids_wid is not None and self._client.data_object.exists(
180 self._offset2ids_wid
181 ):
182 self._client.data_object.update(
183 data_object={'_offset2ids': self._offset2ids.ids},
184 class_name=self._meta_name,
185 uuid=self._offset2ids_wid,
186 )
187 else:
188 self._offset2ids_wid = str(uuid.uuid1())
189 self._client.data_object.create(
190 data_object={'_offset2ids': self._offset2ids.ids},
191 class_name=self._meta_name,
192 uuid=self._offset2ids_wid,
193 )
194
195 def _get_offset2ids_meta(self) -> Tuple[List, str]:
196 """Return the offset2ids stored in weaviate along with the name of the schema/class
197 in weaviate that stores meta information of this object
198
199 :return: a tuple with first element as a list of offset2ids and second element
200 being name of weaviate class/schema of the meta object
201
202 :raises ValueError: error is raised if meta class name is not defined
203 """
204 if not self._meta_name:
205 raise ValueError('meta object is not defined')
206
207 resp = (
208 self._client.query.get(self._meta_name, ['_offset2ids', '_additional {id}'])
209 .do()
210 .get('data', {})
211 .get('Get', {})
212 .get(self._meta_name, [])
213 )
214
215 if not resp:
216 return [], None
217 elif len(resp) == 1:
218 return resp[0]['_offset2ids'], resp[0]['_additional']['id']
219 else:
220 raise ValueError('received multiple meta copies which is invalid')
221
222 @property
223 def name(self):
224 """An alias to _class_name that returns the id/name of the class
225 in the weaviate of this :class:`DocumentArrayWeaviate`
226
227 :return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`
228 """
229 return self._class_name
230
231 @property
232 def _class_name(self):
233 """Return the name of the class in weaviate of this :class:`DocumentArrayWeaviate
234
235 :return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`
236 """
237 if not self._schemas:
238 return None
239 return self._schemas['classes'][0]['class']
240
241 @property
242 def _meta_name(self):
243 """Return the name of the class in weaviate that stores the meta information of
244 this :class:`DocumentArrayWeaviate`
245
246 :return: name of weaviate class/schema of class that stores the meta information
247 """
248 # TODO: remove this after we combine the meta info to the DocumentArray class
249 if not self._schemas:
250 return None
251 return self._schemas['classes'][1]['class']
252
253 @property
254 def _class_schema(self) -> Optional[Dict]:
255 """Return the schema dictionary of this :class:`DocumentArrayWeaviate`'s weaviate schema
256
257 :return: the dictionary representing this weaviate schema
258 """
259 if not self._schemas:
260 return None
261 return self._schemas['classes'][0]
262
263 @property
264 def _meta_schema(self):
265 """Return the schema dictionary of this weaviate schema that stores this object's meta
266
267 :return: the dictionary representing a meta object's weaviate schema
268 """
269 if not self._schemas and len(self._schemas) < 2:
270 return None
271 return self._schemas['classes'][1]
272
273 def _doc2weaviate_create_payload(self, value: 'Document'):
274 """Return the payload to store :class:`Document` into weaviate
275
276 :param value: document to create a payload for
277 :return: the payload dictionary
278 """
279 return dict(
280 data_object={'_serialized': value.to_base64(**self._serialize_config)},
281 class_name=self._class_name,
282 uuid=self._map_id(value.id),
283 vector=self._map_embedding(value.embedding),
284 )
285
286 def _map_id(self, doc_id: str):
287 """the function maps doc id to weaviate id
288
289 :param doc_id: id of the document
290 :return: weaviate object id
291 """
292 # appending class name to doc id to handle the case:
293 # daw1 = DocumentArrayWeaviate([Document(id=str(i), text='hi') for i in range(3)])
294 # daw2 = DocumentArrayWeaviate([Document(id=str(i), text='bye') for i in range(3)])
295 # daw2[0, 'text'] == 'hi' # this will be False if we don't append class name
296 return str(uuid.uuid5(uuid.NAMESPACE_URL, doc_id + self._class_name))
297
298 def _map_embedding(self, embedding: 'ArrayType'):
299 if embedding is not None:
300 from ....math.ndarray import to_numpy_array
301
302 embedding = to_numpy_array(embedding)
303
304 if embedding.ndim > 1:
305 embedding = np.asarray(embedding).squeeze()
306
307 # Weaviate expects vector to have dim 2 at least
308 # or get weaviate.exceptions.UnexpectedStatusCodeException: models.C11yVector
309 # hence we cast it to list of a single element
310 if len(embedding) == 1:
311 embedding = [embedding[0]]
312 else:
313 embedding = None
314 return embedding
315
316 def __getstate__(self):
317 d = dict(self.__dict__)
318 del d['_client']
319 return d
320
321 def __setstate__(self, state):
322 self.__dict__ = state
323 self._client = weaviate.Client(
324 f'{state["_config"].protocol}://{state["_config"].host}:{state["_config"].port}'
325 )
326
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docarray/array/storage/weaviate/backend.py b/docarray/array/storage/weaviate/backend.py
--- a/docarray/array/storage/weaviate/backend.py
+++ b/docarray/array/storage/weaviate/backend.py
@@ -13,13 +13,13 @@
import numpy as np
import weaviate
-from ..base.backend import BaseBackendMixin
-from ..registry import _REGISTRY
from .... import Document
from ....helper import dataclass_from_dict, filter_dict
+from ..base.backend import BaseBackendMixin
+from ..registry import _REGISTRY
if TYPE_CHECKING:
- from ....typing import DocumentArraySourceType, ArrayType
+ from ....typing import ArrayType, DocumentArraySourceType
@dataclass
@@ -35,6 +35,7 @@
n_dim: Optional[int] = None # deprecated, not used anymore since weaviate 1.10
ef: Optional[int] = None
ef_construction: Optional[int] = None
+ timeout_config: Optional[Tuple[int, int]] = None
max_connections: Optional[int] = None
@@ -72,7 +73,8 @@
self._persist = bool(config.name)
self._client = weaviate.Client(
- f'{config.protocol}://{config.host}:{config.port}'
+ f'{config.protocol}://{config.host}:{config.port}',
+ timeout_config=config.timeout_config,
)
self._config = config
| {"golden_diff": "diff --git a/docarray/array/storage/weaviate/backend.py b/docarray/array/storage/weaviate/backend.py\n--- a/docarray/array/storage/weaviate/backend.py\n+++ b/docarray/array/storage/weaviate/backend.py\n@@ -13,13 +13,13 @@\n import numpy as np\n import weaviate\n \n-from ..base.backend import BaseBackendMixin\n-from ..registry import _REGISTRY\n from .... import Document\n from ....helper import dataclass_from_dict, filter_dict\n+from ..base.backend import BaseBackendMixin\n+from ..registry import _REGISTRY\n \n if TYPE_CHECKING:\n- from ....typing import DocumentArraySourceType, ArrayType\n+ from ....typing import ArrayType, DocumentArraySourceType\n \n \n @dataclass\n@@ -35,6 +35,7 @@\n n_dim: Optional[int] = None # deprecated, not used anymore since weaviate 1.10\n ef: Optional[int] = None\n ef_construction: Optional[int] = None\n+ timeout_config: Optional[Tuple[int, int]] = None\n max_connections: Optional[int] = None\n \n \n@@ -72,7 +73,8 @@\n self._persist = bool(config.name)\n \n self._client = weaviate.Client(\n- f'{config.protocol}://{config.host}:{config.port}'\n+ f'{config.protocol}://{config.host}:{config.port}',\n+ timeout_config=config.timeout_config,\n )\n self._config = config\n", "issue": "Expose Timeout Parameters in weaviate\nWeaviate storage currently does not allow specifying the Timeout parameters.\r\nAdd the parameter to `WeaviateConfig` and used when creating the weaviate client\n", "before_files": [{"content": "import uuid\nfrom dataclasses import dataclass, field\nfrom typing import (\n Iterable,\n Dict,\n Optional,\n TYPE_CHECKING,\n Union,\n Tuple,\n List,\n)\n\nimport numpy as np\nimport weaviate\n\nfrom ..base.backend import BaseBackendMixin\nfrom ..registry import _REGISTRY\nfrom .... import Document\nfrom ....helper import dataclass_from_dict, filter_dict\n\nif TYPE_CHECKING:\n from ....typing import DocumentArraySourceType, ArrayType\n\n\n@dataclass\nclass WeaviateConfig:\n \"\"\"This class stores the config variables to initialize\n connection to the Weaviate server\"\"\"\n\n host: Optional[str] = field(default='localhost')\n port: Optional[int] = field(default=8080)\n protocol: Optional[str] = field(default='http')\n name: Optional[str] = None\n serialize_config: Dict = field(default_factory=dict)\n n_dim: Optional[int] = None # deprecated, not used anymore since weaviate 1.10\n ef: Optional[int] = None\n ef_construction: Optional[int] = None\n max_connections: Optional[int] = None\n\n\nclass BackendMixin(BaseBackendMixin):\n \"\"\"Provide necessary functions to enable this storage backend.\"\"\"\n\n def _init_storage(\n self,\n _docs: Optional['DocumentArraySourceType'] = None,\n config: Optional[Union[WeaviateConfig, Dict]] = None,\n **kwargs,\n ):\n \"\"\"Initialize weaviate storage.\n\n :param docs: the list of documents to initialize to\n :param config: the config object used to ininitialize connection to weaviate server\n :param kwargs: extra keyword arguments\n :raises ValueError: only one of name or docs can be used for initialization,\n raise an error if both are provided\n \"\"\"\n\n if not config:\n config = WeaviateConfig()\n elif isinstance(config, dict):\n config = dataclass_from_dict(WeaviateConfig, config)\n\n self._serialize_config = config.serialize_config\n\n if config.name and config.name != config.name.capitalize():\n raise ValueError(\n 'Weaviate class name has to be capitalized. '\n 'Please capitalize when declaring the name field in config.'\n )\n\n self._persist = bool(config.name)\n\n self._client = weaviate.Client(\n f'{config.protocol}://{config.host}:{config.port}'\n )\n self._config = config\n\n self._schemas = self._load_or_create_weaviate_schema()\n\n _REGISTRY[self.__class__.__name__][self._class_name].append(self)\n\n super()._init_storage(_docs, **kwargs)\n\n # To align with Sqlite behavior; if `docs` is not `None` and table name\n # is provided, :class:`DocumentArraySqlite` will clear the existing\n # table and load the given `docs`\n if _docs is None:\n return\n elif isinstance(_docs, Iterable):\n self.clear()\n self.extend(_docs)\n else:\n self.clear()\n if isinstance(_docs, Document):\n self.append(_docs)\n\n def _get_weaviate_class_name(self) -> str:\n \"\"\"Generate the class/schema name using the ``uuid1`` module with some\n formatting to tailor to weaviate class name convention\n\n :return: string representing the name of weaviate class/schema name of\n this :class:`DocumentArrayWeaviate` object\n \"\"\"\n return ''.join([i for i in uuid.uuid1().hex if not i.isdigit()]).capitalize()\n\n def _get_schema_by_name(self, cls_name: str) -> Dict:\n \"\"\"Return the schema dictionary object with the class name\n Content of the all dictionaries by this method are the same except the name\n of the weaviate's ``class``\n\n :param cls_name: the name of the schema/class in weaviate\n :return: the schema dictionary\n \"\"\"\n # TODO: ideally we should only use one schema. this will allow us to deal with\n # consistency better\n hnsw_config = {\n 'ef': self._config.ef,\n 'efConstruction': self._config.ef_construction,\n 'maxConnections': self._config.max_connections,\n }\n\n return {\n 'classes': [\n {\n 'class': cls_name,\n \"vectorizer\": \"none\",\n 'vectorIndexConfig': {'skip': False},\n 'properties': [\n {\n 'dataType': ['blob'],\n 'name': '_serialized',\n 'indexInverted': False,\n },\n ],\n },\n {\n 'class': cls_name + 'Meta',\n \"vectorizer\": \"none\",\n 'vectorIndexConfig': {'skip': True, **filter_dict(hnsw_config)},\n 'properties': [\n {\n 'dataType': ['string[]'],\n 'name': '_offset2ids',\n 'indexInverted': False,\n },\n ],\n },\n ]\n }\n\n def _load_or_create_weaviate_schema(self):\n \"\"\"Create a new weaviate schema for this :class:`DocumentArrayWeaviate` object\n if not present in weaviate or if ``self._config.name`` is None. If ``self._config.name``\n is provided and not None and schema with the specified name exists in weaviate,\n then load the object with the given ``self._config.name``\n\n :return: the schemas of this :class`DocumentArrayWeaviate` object and its meta\n \"\"\"\n if not self._config.name:\n name_candidate = self._get_weaviate_class_name()\n doc_schemas = self._get_schema_by_name(name_candidate)\n while self._client.schema.contains(doc_schemas):\n name_candidate = self._get_weaviate_class_name()\n doc_schemas = self._get_schema_by_name(name_candidate)\n self._client.schema.create(doc_schemas)\n self._config.name = name_candidate\n return doc_schemas\n\n doc_schemas = self._get_schema_by_name(self._config.name)\n if self._client.schema.contains(doc_schemas):\n return doc_schemas\n\n self._client.schema.create(doc_schemas)\n return doc_schemas\n\n def _update_offset2ids_meta(self):\n \"\"\"Update the offset2ids in weaviate the the current local version\"\"\"\n if self._offset2ids_wid is not None and self._client.data_object.exists(\n self._offset2ids_wid\n ):\n self._client.data_object.update(\n data_object={'_offset2ids': self._offset2ids.ids},\n class_name=self._meta_name,\n uuid=self._offset2ids_wid,\n )\n else:\n self._offset2ids_wid = str(uuid.uuid1())\n self._client.data_object.create(\n data_object={'_offset2ids': self._offset2ids.ids},\n class_name=self._meta_name,\n uuid=self._offset2ids_wid,\n )\n\n def _get_offset2ids_meta(self) -> Tuple[List, str]:\n \"\"\"Return the offset2ids stored in weaviate along with the name of the schema/class\n in weaviate that stores meta information of this object\n\n :return: a tuple with first element as a list of offset2ids and second element\n being name of weaviate class/schema of the meta object\n\n :raises ValueError: error is raised if meta class name is not defined\n \"\"\"\n if not self._meta_name:\n raise ValueError('meta object is not defined')\n\n resp = (\n self._client.query.get(self._meta_name, ['_offset2ids', '_additional {id}'])\n .do()\n .get('data', {})\n .get('Get', {})\n .get(self._meta_name, [])\n )\n\n if not resp:\n return [], None\n elif len(resp) == 1:\n return resp[0]['_offset2ids'], resp[0]['_additional']['id']\n else:\n raise ValueError('received multiple meta copies which is invalid')\n\n @property\n def name(self):\n \"\"\"An alias to _class_name that returns the id/name of the class\n in the weaviate of this :class:`DocumentArrayWeaviate`\n\n :return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`\n \"\"\"\n return self._class_name\n\n @property\n def _class_name(self):\n \"\"\"Return the name of the class in weaviate of this :class:`DocumentArrayWeaviate\n\n :return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`\n \"\"\"\n if not self._schemas:\n return None\n return self._schemas['classes'][0]['class']\n\n @property\n def _meta_name(self):\n \"\"\"Return the name of the class in weaviate that stores the meta information of\n this :class:`DocumentArrayWeaviate`\n\n :return: name of weaviate class/schema of class that stores the meta information\n \"\"\"\n # TODO: remove this after we combine the meta info to the DocumentArray class\n if not self._schemas:\n return None\n return self._schemas['classes'][1]['class']\n\n @property\n def _class_schema(self) -> Optional[Dict]:\n \"\"\"Return the schema dictionary of this :class:`DocumentArrayWeaviate`'s weaviate schema\n\n :return: the dictionary representing this weaviate schema\n \"\"\"\n if not self._schemas:\n return None\n return self._schemas['classes'][0]\n\n @property\n def _meta_schema(self):\n \"\"\"Return the schema dictionary of this weaviate schema that stores this object's meta\n\n :return: the dictionary representing a meta object's weaviate schema\n \"\"\"\n if not self._schemas and len(self._schemas) < 2:\n return None\n return self._schemas['classes'][1]\n\n def _doc2weaviate_create_payload(self, value: 'Document'):\n \"\"\"Return the payload to store :class:`Document` into weaviate\n\n :param value: document to create a payload for\n :return: the payload dictionary\n \"\"\"\n return dict(\n data_object={'_serialized': value.to_base64(**self._serialize_config)},\n class_name=self._class_name,\n uuid=self._map_id(value.id),\n vector=self._map_embedding(value.embedding),\n )\n\n def _map_id(self, doc_id: str):\n \"\"\"the function maps doc id to weaviate id\n\n :param doc_id: id of the document\n :return: weaviate object id\n \"\"\"\n # appending class name to doc id to handle the case:\n # daw1 = DocumentArrayWeaviate([Document(id=str(i), text='hi') for i in range(3)])\n # daw2 = DocumentArrayWeaviate([Document(id=str(i), text='bye') for i in range(3)])\n # daw2[0, 'text'] == 'hi' # this will be False if we don't append class name\n return str(uuid.uuid5(uuid.NAMESPACE_URL, doc_id + self._class_name))\n\n def _map_embedding(self, embedding: 'ArrayType'):\n if embedding is not None:\n from ....math.ndarray import to_numpy_array\n\n embedding = to_numpy_array(embedding)\n\n if embedding.ndim > 1:\n embedding = np.asarray(embedding).squeeze()\n\n # Weaviate expects vector to have dim 2 at least\n # or get weaviate.exceptions.UnexpectedStatusCodeException: models.C11yVector\n # hence we cast it to list of a single element\n if len(embedding) == 1:\n embedding = [embedding[0]]\n else:\n embedding = None\n return embedding\n\n def __getstate__(self):\n d = dict(self.__dict__)\n del d['_client']\n return d\n\n def __setstate__(self, state):\n self.__dict__ = state\n self._client = weaviate.Client(\n f'{state[\"_config\"].protocol}://{state[\"_config\"].host}:{state[\"_config\"].port}'\n )\n", "path": "docarray/array/storage/weaviate/backend.py"}], "after_files": [{"content": "import uuid\nfrom dataclasses import dataclass, field\nfrom typing import (\n Iterable,\n Dict,\n Optional,\n TYPE_CHECKING,\n Union,\n Tuple,\n List,\n)\n\nimport numpy as np\nimport weaviate\n\nfrom .... import Document\nfrom ....helper import dataclass_from_dict, filter_dict\nfrom ..base.backend import BaseBackendMixin\nfrom ..registry import _REGISTRY\n\nif TYPE_CHECKING:\n from ....typing import ArrayType, DocumentArraySourceType\n\n\n@dataclass\nclass WeaviateConfig:\n \"\"\"This class stores the config variables to initialize\n connection to the Weaviate server\"\"\"\n\n host: Optional[str] = field(default='localhost')\n port: Optional[int] = field(default=8080)\n protocol: Optional[str] = field(default='http')\n name: Optional[str] = None\n serialize_config: Dict = field(default_factory=dict)\n n_dim: Optional[int] = None # deprecated, not used anymore since weaviate 1.10\n ef: Optional[int] = None\n ef_construction: Optional[int] = None\n timeout_config: Optional[Tuple[int, int]] = None\n max_connections: Optional[int] = None\n\n\nclass BackendMixin(BaseBackendMixin):\n \"\"\"Provide necessary functions to enable this storage backend.\"\"\"\n\n def _init_storage(\n self,\n _docs: Optional['DocumentArraySourceType'] = None,\n config: Optional[Union[WeaviateConfig, Dict]] = None,\n **kwargs,\n ):\n \"\"\"Initialize weaviate storage.\n\n :param docs: the list of documents to initialize to\n :param config: the config object used to ininitialize connection to weaviate server\n :param kwargs: extra keyword arguments\n :raises ValueError: only one of name or docs can be used for initialization,\n raise an error if both are provided\n \"\"\"\n\n if not config:\n config = WeaviateConfig()\n elif isinstance(config, dict):\n config = dataclass_from_dict(WeaviateConfig, config)\n\n self._serialize_config = config.serialize_config\n\n if config.name and config.name != config.name.capitalize():\n raise ValueError(\n 'Weaviate class name has to be capitalized. '\n 'Please capitalize when declaring the name field in config.'\n )\n\n self._persist = bool(config.name)\n\n self._client = weaviate.Client(\n f'{config.protocol}://{config.host}:{config.port}',\n timeout_config=config.timeout_config,\n )\n self._config = config\n\n self._schemas = self._load_or_create_weaviate_schema()\n\n _REGISTRY[self.__class__.__name__][self._class_name].append(self)\n\n super()._init_storage(_docs, **kwargs)\n\n # To align with Sqlite behavior; if `docs` is not `None` and table name\n # is provided, :class:`DocumentArraySqlite` will clear the existing\n # table and load the given `docs`\n if _docs is None:\n return\n elif isinstance(_docs, Iterable):\n self.clear()\n self.extend(_docs)\n else:\n self.clear()\n if isinstance(_docs, Document):\n self.append(_docs)\n\n def _get_weaviate_class_name(self) -> str:\n \"\"\"Generate the class/schema name using the ``uuid1`` module with some\n formatting to tailor to weaviate class name convention\n\n :return: string representing the name of weaviate class/schema name of\n this :class:`DocumentArrayWeaviate` object\n \"\"\"\n return ''.join([i for i in uuid.uuid1().hex if not i.isdigit()]).capitalize()\n\n def _get_schema_by_name(self, cls_name: str) -> Dict:\n \"\"\"Return the schema dictionary object with the class name\n Content of the all dictionaries by this method are the same except the name\n of the weaviate's ``class``\n\n :param cls_name: the name of the schema/class in weaviate\n :return: the schema dictionary\n \"\"\"\n # TODO: ideally we should only use one schema. this will allow us to deal with\n # consistency better\n hnsw_config = {\n 'ef': self._config.ef,\n 'efConstruction': self._config.ef_construction,\n 'maxConnections': self._config.max_connections,\n }\n\n return {\n 'classes': [\n {\n 'class': cls_name,\n \"vectorizer\": \"none\",\n 'vectorIndexConfig': {'skip': False},\n 'properties': [\n {\n 'dataType': ['blob'],\n 'name': '_serialized',\n 'indexInverted': False,\n },\n ],\n },\n {\n 'class': cls_name + 'Meta',\n \"vectorizer\": \"none\",\n 'vectorIndexConfig': {'skip': True, **filter_dict(hnsw_config)},\n 'properties': [\n {\n 'dataType': ['string[]'],\n 'name': '_offset2ids',\n 'indexInverted': False,\n },\n ],\n },\n ]\n }\n\n def _load_or_create_weaviate_schema(self):\n \"\"\"Create a new weaviate schema for this :class:`DocumentArrayWeaviate` object\n if not present in weaviate or if ``self._config.name`` is None. If ``self._config.name``\n is provided and not None and schema with the specified name exists in weaviate,\n then load the object with the given ``self._config.name``\n\n :return: the schemas of this :class`DocumentArrayWeaviate` object and its meta\n \"\"\"\n if not self._config.name:\n name_candidate = self._get_weaviate_class_name()\n doc_schemas = self._get_schema_by_name(name_candidate)\n while self._client.schema.contains(doc_schemas):\n name_candidate = self._get_weaviate_class_name()\n doc_schemas = self._get_schema_by_name(name_candidate)\n self._client.schema.create(doc_schemas)\n self._config.name = name_candidate\n return doc_schemas\n\n doc_schemas = self._get_schema_by_name(self._config.name)\n if self._client.schema.contains(doc_schemas):\n return doc_schemas\n\n self._client.schema.create(doc_schemas)\n return doc_schemas\n\n def _update_offset2ids_meta(self):\n \"\"\"Update the offset2ids in weaviate the the current local version\"\"\"\n if self._offset2ids_wid is not None and self._client.data_object.exists(\n self._offset2ids_wid\n ):\n self._client.data_object.update(\n data_object={'_offset2ids': self._offset2ids.ids},\n class_name=self._meta_name,\n uuid=self._offset2ids_wid,\n )\n else:\n self._offset2ids_wid = str(uuid.uuid1())\n self._client.data_object.create(\n data_object={'_offset2ids': self._offset2ids.ids},\n class_name=self._meta_name,\n uuid=self._offset2ids_wid,\n )\n\n def _get_offset2ids_meta(self) -> Tuple[List, str]:\n \"\"\"Return the offset2ids stored in weaviate along with the name of the schema/class\n in weaviate that stores meta information of this object\n\n :return: a tuple with first element as a list of offset2ids and second element\n being name of weaviate class/schema of the meta object\n\n :raises ValueError: error is raised if meta class name is not defined\n \"\"\"\n if not self._meta_name:\n raise ValueError('meta object is not defined')\n\n resp = (\n self._client.query.get(self._meta_name, ['_offset2ids', '_additional {id}'])\n .do()\n .get('data', {})\n .get('Get', {})\n .get(self._meta_name, [])\n )\n\n if not resp:\n return [], None\n elif len(resp) == 1:\n return resp[0]['_offset2ids'], resp[0]['_additional']['id']\n else:\n raise ValueError('received multiple meta copies which is invalid')\n\n @property\n def name(self):\n \"\"\"An alias to _class_name that returns the id/name of the class\n in the weaviate of this :class:`DocumentArrayWeaviate`\n\n :return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`\n \"\"\"\n return self._class_name\n\n @property\n def _class_name(self):\n \"\"\"Return the name of the class in weaviate of this :class:`DocumentArrayWeaviate\n\n :return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`\n \"\"\"\n if not self._schemas:\n return None\n return self._schemas['classes'][0]['class']\n\n @property\n def _meta_name(self):\n \"\"\"Return the name of the class in weaviate that stores the meta information of\n this :class:`DocumentArrayWeaviate`\n\n :return: name of weaviate class/schema of class that stores the meta information\n \"\"\"\n # TODO: remove this after we combine the meta info to the DocumentArray class\n if not self._schemas:\n return None\n return self._schemas['classes'][1]['class']\n\n @property\n def _class_schema(self) -> Optional[Dict]:\n \"\"\"Return the schema dictionary of this :class:`DocumentArrayWeaviate`'s weaviate schema\n\n :return: the dictionary representing this weaviate schema\n \"\"\"\n if not self._schemas:\n return None\n return self._schemas['classes'][0]\n\n @property\n def _meta_schema(self):\n \"\"\"Return the schema dictionary of this weaviate schema that stores this object's meta\n\n :return: the dictionary representing a meta object's weaviate schema\n \"\"\"\n if not self._schemas and len(self._schemas) < 2:\n return None\n return self._schemas['classes'][1]\n\n def _doc2weaviate_create_payload(self, value: 'Document'):\n \"\"\"Return the payload to store :class:`Document` into weaviate\n\n :param value: document to create a payload for\n :return: the payload dictionary\n \"\"\"\n return dict(\n data_object={'_serialized': value.to_base64(**self._serialize_config)},\n class_name=self._class_name,\n uuid=self._map_id(value.id),\n vector=self._map_embedding(value.embedding),\n )\n\n def _map_id(self, doc_id: str):\n \"\"\"the function maps doc id to weaviate id\n\n :param doc_id: id of the document\n :return: weaviate object id\n \"\"\"\n # appending class name to doc id to handle the case:\n # daw1 = DocumentArrayWeaviate([Document(id=str(i), text='hi') for i in range(3)])\n # daw2 = DocumentArrayWeaviate([Document(id=str(i), text='bye') for i in range(3)])\n # daw2[0, 'text'] == 'hi' # this will be False if we don't append class name\n return str(uuid.uuid5(uuid.NAMESPACE_URL, doc_id + self._class_name))\n\n def _map_embedding(self, embedding: 'ArrayType'):\n if embedding is not None:\n from ....math.ndarray import to_numpy_array\n\n embedding = to_numpy_array(embedding)\n\n if embedding.ndim > 1:\n embedding = np.asarray(embedding).squeeze()\n\n # Weaviate expects vector to have dim 2 at least\n # or get weaviate.exceptions.UnexpectedStatusCodeException: models.C11yVector\n # hence we cast it to list of a single element\n if len(embedding) == 1:\n embedding = [embedding[0]]\n else:\n embedding = None\n return embedding\n\n def __getstate__(self):\n d = dict(self.__dict__)\n del d['_client']\n return d\n\n def __setstate__(self, state):\n self.__dict__ = state\n self._client = weaviate.Client(\n f'{state[\"_config\"].protocol}://{state[\"_config\"].host}:{state[\"_config\"].port}'\n )\n", "path": "docarray/array/storage/weaviate/backend.py"}]} | 3,835 | 322 |
gh_patches_debug_6829 | rasdani/github-patches | git_diff | zulip__zulip-23329 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename Recent topics to Recent conversations
Once #19449 has been resolved, we should rename Recent topics to Recent conversations across the board. This includes the left sidebar, documentation, settings menus, marketing pages, and anywhere else this term may be used.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/onboarding.py`
Content:
```
1 from typing import Dict, List
2
3 from django.conf import settings
4 from django.db import transaction
5 from django.db.models import Count
6 from django.utils.translation import gettext as _
7 from django.utils.translation import override as override_language
8
9 from zerver.actions.create_realm import setup_realm_internal_bots
10 from zerver.actions.message_send import (
11 do_send_messages,
12 internal_prep_stream_message_by_name,
13 internal_send_private_message,
14 )
15 from zerver.actions.reactions import do_add_reaction
16 from zerver.lib.emoji import emoji_name_to_emoji_code
17 from zerver.lib.message import SendMessageRequest
18 from zerver.models import Message, Realm, UserProfile, get_system_bot
19
20
21 def missing_any_realm_internal_bots() -> bool:
22 bot_emails = [
23 bot["email_template"] % (settings.INTERNAL_BOT_DOMAIN,)
24 for bot in settings.REALM_INTERNAL_BOTS
25 ]
26 bot_counts = {
27 email: count
28 for email, count in UserProfile.objects.filter(email__in=bot_emails)
29 .values_list("email")
30 .annotate(Count("id"))
31 }
32 realm_count = Realm.objects.count()
33 return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)
34
35
36 def create_if_missing_realm_internal_bots() -> None:
37 """This checks if there is any realm internal bot missing.
38
39 If that is the case, it creates the missing realm internal bots.
40 """
41 if missing_any_realm_internal_bots():
42 for realm in Realm.objects.all():
43 setup_realm_internal_bots(realm)
44
45
46 def send_initial_pms(user: UserProfile) -> None:
47 organization_setup_text = ""
48
49 # We need to override the language in this code path, because it's
50 # called from account registration, which is a pre-account API
51 # request and thus may not have the user's language context yet.
52 with override_language(user.default_language):
53 if user.is_realm_admin:
54 help_url = user.realm.uri + "/help/getting-your-organization-started-with-zulip"
55 organization_setup_text = (
56 " " + _("We also have a guide for [Setting up your organization]({help_url}).")
57 ).format(help_url=help_url)
58
59 welcome_msg = _("Hello, and welcome to Zulip!") + "👋"
60 demo_org_warning = ""
61 if user.realm.demo_organization_scheduled_deletion_date is not None:
62 demo_org_warning = (
63 _(
64 "Note that this is a [demo organization]({demo_org_help_url}) and will be "
65 "**automatically deleted** in 30 days."
66 )
67 + "\n\n"
68 )
69
70 content = "".join(
71 [
72 welcome_msg + " ",
73 _("This is a private message from me, Welcome Bot.") + "\n\n",
74 _(
75 "If you are new to Zulip, check out our [Getting started guide]({getting_started_url})!"
76 ),
77 "{organization_setup_text}" + "\n\n",
78 "{demo_org_warning}",
79 _(
80 "I can also help you get set up! Just click anywhere on this message or press `r` to reply."
81 )
82 + "\n\n",
83 _("Here are a few messages I understand:") + " ",
84 bot_commands(),
85 ]
86 )
87
88 content = content.format(
89 organization_setup_text=organization_setup_text,
90 demo_org_warning=demo_org_warning,
91 demo_org_help_url="/help/demo-organizations",
92 getting_started_url="/help/getting-started-with-zulip",
93 )
94
95 internal_send_private_message(
96 get_system_bot(settings.WELCOME_BOT, user.realm_id), user, content
97 )
98
99
100 def bot_commands(no_help_command: bool = False) -> str:
101 commands = [
102 "apps",
103 "profile",
104 "theme",
105 "streams",
106 "topics",
107 "message formatting",
108 "keyboard shortcuts",
109 ]
110 if not no_help_command:
111 commands.append("help")
112 return ", ".join(["`" + command + "`" for command in commands]) + "."
113
114
115 def select_welcome_bot_response(human_response_lower: str) -> str:
116 # Given the raw (pre-markdown-rendering) content for a private
117 # message from the user to Welcome Bot, select the appropriate reply.
118 if human_response_lower in ["app", "apps"]:
119 return _(
120 "You can [download](/apps) the [mobile and desktop apps](/apps). "
121 "Zulip also works great in a browser."
122 )
123 elif human_response_lower == "profile":
124 return _(
125 "Go to [Profile settings](#settings/profile) "
126 "to add a [profile picture](/help/change-your-profile-picture) "
127 "and edit your [profile information](/help/edit-your-profile)."
128 )
129 elif human_response_lower == "theme":
130 return _(
131 "Go to [Display settings](#settings/display-settings) "
132 "to [switch between the light and dark themes](/help/dark-theme), "
133 "[pick your favorite emoji theme](/help/emoji-and-emoticons#change-your-emoji-set), "
134 "[change your language](/help/change-your-language), "
135 "and make other tweaks to your Zulip experience."
136 )
137 elif human_response_lower in ["stream", "streams", "channel", "channels"]:
138 return "".join(
139 [
140 _(
141 "In Zulip, streams [determine who gets a message](/help/streams-and-topics). "
142 "They are similar to channels in other chat apps."
143 )
144 + "\n\n",
145 _("[Browse and subscribe to streams](#streams/all)."),
146 ]
147 )
148 elif human_response_lower in ["topic", "topics"]:
149 return "".join(
150 [
151 _(
152 "In Zulip, topics [tell you what a message is about](/help/streams-and-topics). "
153 "They are light-weight subjects, very similar to the subject line of an email."
154 )
155 + "\n\n",
156 _(
157 "Check out [Recent conversations](#recent_topics) to see what's happening! "
158 'You can return to this conversation by clicking "Private messages" in the upper left.'
159 ),
160 ]
161 )
162 elif human_response_lower in ["keyboard", "shortcuts", "keyboard shortcuts"]:
163 return "".join(
164 [
165 _(
166 "Zulip's [keyboard shortcuts](#keyboard-shortcuts) "
167 "let you navigate the app quickly and efficiently."
168 )
169 + "\n\n",
170 _("Press `?` any time to see a [cheat sheet](#keyboard-shortcuts)."),
171 ]
172 )
173 elif human_response_lower in ["formatting", "message formatting"]:
174 return "".join(
175 [
176 _(
177 "Zulip uses [Markdown](/help/format-your-message-using-markdown), "
178 "an intuitive format for **bold**, *italics*, bulleted lists, and more. "
179 "Click [here](#message-formatting) for a cheat sheet."
180 )
181 + "\n\n",
182 _(
183 "Check out our [messaging tips](/help/messaging-tips) "
184 "to learn about emoji reactions, code blocks and much more!"
185 ),
186 ]
187 )
188 elif human_response_lower in ["help", "?"]:
189 return "".join(
190 [
191 _("Here are a few messages I understand:") + " ",
192 bot_commands(no_help_command=True) + "\n\n",
193 _(
194 "Check out our [Getting started guide](/help/getting-started-with-zulip), "
195 "or browse the [Help center](/help/) to learn more!"
196 ),
197 ]
198 )
199 else:
200 return "".join(
201 [
202 _(
203 "I’m sorry, I did not understand your message. Please try one of the following commands:"
204 )
205 + " ",
206 bot_commands(),
207 ]
208 )
209
210
211 def send_welcome_bot_response(send_request: SendMessageRequest) -> None:
212 """Given the send_request object for a private message from the user
213 to welcome-bot, trigger the welcome-bot reply."""
214 welcome_bot = get_system_bot(settings.WELCOME_BOT, send_request.message.sender.realm_id)
215 human_response_lower = send_request.message.content.lower()
216 content = select_welcome_bot_response(human_response_lower)
217 internal_send_private_message(welcome_bot, send_request.message.sender, content)
218
219
220 @transaction.atomic
221 def send_initial_realm_messages(realm: Realm) -> None:
222 welcome_bot = get_system_bot(settings.WELCOME_BOT, realm.id)
223 # Make sure each stream created in the realm creation process has at least one message below
224 # Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home
225 # view slightly less overwhelming
226 content_of_private_streams_topic = (
227 _("This is a private stream, as indicated by the lock icon next to the stream name.")
228 + " "
229 + _("Private streams are only visible to stream members.")
230 + "\n"
231 "\n"
232 + _(
233 "To manage this stream, go to [Stream settings]({stream_settings_url}) "
234 "and click on `{initial_private_stream_name}`."
235 )
236 ).format(
237 stream_settings_url="#streams/subscribed",
238 initial_private_stream_name=Realm.INITIAL_PRIVATE_STREAM_NAME,
239 )
240
241 content1_of_topic_demonstration_topic = (
242 _(
243 "This is a message on stream #**{default_notification_stream_name}** with the "
244 "topic `topic demonstration`."
245 )
246 ).format(default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME)
247
248 content2_of_topic_demonstration_topic = (
249 _("Topics are a lightweight tool to keep conversations organized.")
250 + " "
251 + _("You can learn more about topics at [Streams and topics]({about_topics_help_url}).")
252 ).format(about_topics_help_url="/help/streams-and-topics")
253
254 content_of_swimming_turtles_topic = (
255 _(
256 "This is a message on stream #**{default_notification_stream_name}** with the "
257 "topic `swimming turtles`."
258 )
259 + "\n"
260 "\n"
261 "[](/static/images/cute/turtle.png)"
262 "\n"
263 "\n"
264 + _(
265 "[Start a new topic]({start_topic_help_url}) any time you're not replying to a \
266 previous message."
267 )
268 ).format(
269 default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
270 start_topic_help_url="/help/start-a-new-topic",
271 )
272
273 welcome_messages: List[Dict[str, str]] = [
274 {
275 "stream": Realm.INITIAL_PRIVATE_STREAM_NAME,
276 "topic": "private streams",
277 "content": content_of_private_streams_topic,
278 },
279 {
280 "stream": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
281 "topic": "topic demonstration",
282 "content": content1_of_topic_demonstration_topic,
283 },
284 {
285 "stream": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
286 "topic": "topic demonstration",
287 "content": content2_of_topic_demonstration_topic,
288 },
289 {
290 "stream": realm.DEFAULT_NOTIFICATION_STREAM_NAME,
291 "topic": "swimming turtles",
292 "content": content_of_swimming_turtles_topic,
293 },
294 ]
295
296 messages = [
297 internal_prep_stream_message_by_name(
298 realm,
299 welcome_bot,
300 message["stream"],
301 message["topic"],
302 message["content"],
303 )
304 for message in welcome_messages
305 ]
306 message_ids = do_send_messages(messages)
307
308 # We find the one of our just-sent messages with turtle.png in it,
309 # and react to it. This is a bit hacky, but works and is kinda a
310 # 1-off thing.
311 turtle_message = Message.objects.select_for_update().get(
312 id__in=message_ids, content__icontains="cute/turtle.png"
313 )
314 (emoji_code, reaction_type) = emoji_name_to_emoji_code(realm, "turtle")
315 do_add_reaction(welcome_bot, turtle_message, "turtle", emoji_code, reaction_type)
316
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/onboarding.py b/zerver/lib/onboarding.py
--- a/zerver/lib/onboarding.py
+++ b/zerver/lib/onboarding.py
@@ -154,7 +154,7 @@
)
+ "\n\n",
_(
- "Check out [Recent conversations](#recent_topics) to see what's happening! "
+ "Check out [Recent conversations](#recent) to see what's happening! "
'You can return to this conversation by clicking "Private messages" in the upper left.'
),
]
| {"golden_diff": "diff --git a/zerver/lib/onboarding.py b/zerver/lib/onboarding.py\n--- a/zerver/lib/onboarding.py\n+++ b/zerver/lib/onboarding.py\n@@ -154,7 +154,7 @@\n )\n + \"\\n\\n\",\n _(\n- \"Check out [Recent conversations](#recent_topics) to see what's happening! \"\n+ \"Check out [Recent conversations](#recent) to see what's happening! \"\n 'You can return to this conversation by clicking \"Private messages\" in the upper left.'\n ),\n ]\n", "issue": "Rename Recent topics to Recent conversations\nOnce #19449 has been resolved, we should rename Recent topics to Recent conversations across the board. This includes the left sidebar, documentation, settings menus, marketing pages, and anywhere else this term may be used.\n", "before_files": [{"content": "from typing import Dict, List\n\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import override as override_language\n\nfrom zerver.actions.create_realm import setup_realm_internal_bots\nfrom zerver.actions.message_send import (\n do_send_messages,\n internal_prep_stream_message_by_name,\n internal_send_private_message,\n)\nfrom zerver.actions.reactions import do_add_reaction\nfrom zerver.lib.emoji import emoji_name_to_emoji_code\nfrom zerver.lib.message import SendMessageRequest\nfrom zerver.models import Message, Realm, UserProfile, get_system_bot\n\n\ndef missing_any_realm_internal_bots() -> bool:\n bot_emails = [\n bot[\"email_template\"] % (settings.INTERNAL_BOT_DOMAIN,)\n for bot in settings.REALM_INTERNAL_BOTS\n ]\n bot_counts = {\n email: count\n for email, count in UserProfile.objects.filter(email__in=bot_emails)\n .values_list(\"email\")\n .annotate(Count(\"id\"))\n }\n realm_count = Realm.objects.count()\n return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)\n\n\ndef create_if_missing_realm_internal_bots() -> None:\n \"\"\"This checks if there is any realm internal bot missing.\n\n If that is the case, it creates the missing realm internal bots.\n \"\"\"\n if missing_any_realm_internal_bots():\n for realm in Realm.objects.all():\n setup_realm_internal_bots(realm)\n\n\ndef send_initial_pms(user: UserProfile) -> None:\n organization_setup_text = \"\"\n\n # We need to override the language in this code path, because it's\n # called from account registration, which is a pre-account API\n # request and thus may not have the user's language context yet.\n with override_language(user.default_language):\n if user.is_realm_admin:\n help_url = user.realm.uri + \"/help/getting-your-organization-started-with-zulip\"\n organization_setup_text = (\n \" \" + _(\"We also have a guide for [Setting up your organization]({help_url}).\")\n ).format(help_url=help_url)\n\n welcome_msg = _(\"Hello, and welcome to Zulip!\") + \"\ud83d\udc4b\"\n demo_org_warning = \"\"\n if user.realm.demo_organization_scheduled_deletion_date is not None:\n demo_org_warning = (\n _(\n \"Note that this is a [demo organization]({demo_org_help_url}) and will be \"\n \"**automatically deleted** in 30 days.\"\n )\n + \"\\n\\n\"\n )\n\n content = \"\".join(\n [\n welcome_msg + \" \",\n _(\"This is a private message from me, Welcome Bot.\") + \"\\n\\n\",\n _(\n \"If you are new to Zulip, check out our [Getting started guide]({getting_started_url})!\"\n ),\n \"{organization_setup_text}\" + \"\\n\\n\",\n \"{demo_org_warning}\",\n _(\n \"I can also help you get set up! Just click anywhere on this message or press `r` to reply.\"\n )\n + \"\\n\\n\",\n _(\"Here are a few messages I understand:\") + \" \",\n bot_commands(),\n ]\n )\n\n content = content.format(\n organization_setup_text=organization_setup_text,\n demo_org_warning=demo_org_warning,\n demo_org_help_url=\"/help/demo-organizations\",\n getting_started_url=\"/help/getting-started-with-zulip\",\n )\n\n internal_send_private_message(\n get_system_bot(settings.WELCOME_BOT, user.realm_id), user, content\n )\n\n\ndef bot_commands(no_help_command: bool = False) -> str:\n commands = [\n \"apps\",\n \"profile\",\n \"theme\",\n \"streams\",\n \"topics\",\n \"message formatting\",\n \"keyboard shortcuts\",\n ]\n if not no_help_command:\n commands.append(\"help\")\n return \", \".join([\"`\" + command + \"`\" for command in commands]) + \".\"\n\n\ndef select_welcome_bot_response(human_response_lower: str) -> str:\n # Given the raw (pre-markdown-rendering) content for a private\n # message from the user to Welcome Bot, select the appropriate reply.\n if human_response_lower in [\"app\", \"apps\"]:\n return _(\n \"You can [download](/apps) the [mobile and desktop apps](/apps). \"\n \"Zulip also works great in a browser.\"\n )\n elif human_response_lower == \"profile\":\n return _(\n \"Go to [Profile settings](#settings/profile) \"\n \"to add a [profile picture](/help/change-your-profile-picture) \"\n \"and edit your [profile information](/help/edit-your-profile).\"\n )\n elif human_response_lower == \"theme\":\n return _(\n \"Go to [Display settings](#settings/display-settings) \"\n \"to [switch between the light and dark themes](/help/dark-theme), \"\n \"[pick your favorite emoji theme](/help/emoji-and-emoticons#change-your-emoji-set), \"\n \"[change your language](/help/change-your-language), \"\n \"and make other tweaks to your Zulip experience.\"\n )\n elif human_response_lower in [\"stream\", \"streams\", \"channel\", \"channels\"]:\n return \"\".join(\n [\n _(\n \"In Zulip, streams [determine who gets a message](/help/streams-and-topics). \"\n \"They are similar to channels in other chat apps.\"\n )\n + \"\\n\\n\",\n _(\"[Browse and subscribe to streams](#streams/all).\"),\n ]\n )\n elif human_response_lower in [\"topic\", \"topics\"]:\n return \"\".join(\n [\n _(\n \"In Zulip, topics [tell you what a message is about](/help/streams-and-topics). \"\n \"They are light-weight subjects, very similar to the subject line of an email.\"\n )\n + \"\\n\\n\",\n _(\n \"Check out [Recent conversations](#recent_topics) to see what's happening! \"\n 'You can return to this conversation by clicking \"Private messages\" in the upper left.'\n ),\n ]\n )\n elif human_response_lower in [\"keyboard\", \"shortcuts\", \"keyboard shortcuts\"]:\n return \"\".join(\n [\n _(\n \"Zulip's [keyboard shortcuts](#keyboard-shortcuts) \"\n \"let you navigate the app quickly and efficiently.\"\n )\n + \"\\n\\n\",\n _(\"Press `?` any time to see a [cheat sheet](#keyboard-shortcuts).\"),\n ]\n )\n elif human_response_lower in [\"formatting\", \"message formatting\"]:\n return \"\".join(\n [\n _(\n \"Zulip uses [Markdown](/help/format-your-message-using-markdown), \"\n \"an intuitive format for **bold**, *italics*, bulleted lists, and more. \"\n \"Click [here](#message-formatting) for a cheat sheet.\"\n )\n + \"\\n\\n\",\n _(\n \"Check out our [messaging tips](/help/messaging-tips) \"\n \"to learn about emoji reactions, code blocks and much more!\"\n ),\n ]\n )\n elif human_response_lower in [\"help\", \"?\"]:\n return \"\".join(\n [\n _(\"Here are a few messages I understand:\") + \" \",\n bot_commands(no_help_command=True) + \"\\n\\n\",\n _(\n \"Check out our [Getting started guide](/help/getting-started-with-zulip), \"\n \"or browse the [Help center](/help/) to learn more!\"\n ),\n ]\n )\n else:\n return \"\".join(\n [\n _(\n \"I\u2019m sorry, I did not understand your message. Please try one of the following commands:\"\n )\n + \" \",\n bot_commands(),\n ]\n )\n\n\ndef send_welcome_bot_response(send_request: SendMessageRequest) -> None:\n \"\"\"Given the send_request object for a private message from the user\n to welcome-bot, trigger the welcome-bot reply.\"\"\"\n welcome_bot = get_system_bot(settings.WELCOME_BOT, send_request.message.sender.realm_id)\n human_response_lower = send_request.message.content.lower()\n content = select_welcome_bot_response(human_response_lower)\n internal_send_private_message(welcome_bot, send_request.message.sender, content)\n\n\[email protected]\ndef send_initial_realm_messages(realm: Realm) -> None:\n welcome_bot = get_system_bot(settings.WELCOME_BOT, realm.id)\n # Make sure each stream created in the realm creation process has at least one message below\n # Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home\n # view slightly less overwhelming\n content_of_private_streams_topic = (\n _(\"This is a private stream, as indicated by the lock icon next to the stream name.\")\n + \" \"\n + _(\"Private streams are only visible to stream members.\")\n + \"\\n\"\n \"\\n\"\n + _(\n \"To manage this stream, go to [Stream settings]({stream_settings_url}) \"\n \"and click on `{initial_private_stream_name}`.\"\n )\n ).format(\n stream_settings_url=\"#streams/subscribed\",\n initial_private_stream_name=Realm.INITIAL_PRIVATE_STREAM_NAME,\n )\n\n content1_of_topic_demonstration_topic = (\n _(\n \"This is a message on stream #**{default_notification_stream_name}** with the \"\n \"topic `topic demonstration`.\"\n )\n ).format(default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME)\n\n content2_of_topic_demonstration_topic = (\n _(\"Topics are a lightweight tool to keep conversations organized.\")\n + \" \"\n + _(\"You can learn more about topics at [Streams and topics]({about_topics_help_url}).\")\n ).format(about_topics_help_url=\"/help/streams-and-topics\")\n\n content_of_swimming_turtles_topic = (\n _(\n \"This is a message on stream #**{default_notification_stream_name}** with the \"\n \"topic `swimming turtles`.\"\n )\n + \"\\n\"\n \"\\n\"\n \"[](/static/images/cute/turtle.png)\"\n \"\\n\"\n \"\\n\"\n + _(\n \"[Start a new topic]({start_topic_help_url}) any time you're not replying to a \\\n previous message.\"\n )\n ).format(\n default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n start_topic_help_url=\"/help/start-a-new-topic\",\n )\n\n welcome_messages: List[Dict[str, str]] = [\n {\n \"stream\": Realm.INITIAL_PRIVATE_STREAM_NAME,\n \"topic\": \"private streams\",\n \"content\": content_of_private_streams_topic,\n },\n {\n \"stream\": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"topic demonstration\",\n \"content\": content1_of_topic_demonstration_topic,\n },\n {\n \"stream\": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"topic demonstration\",\n \"content\": content2_of_topic_demonstration_topic,\n },\n {\n \"stream\": realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"swimming turtles\",\n \"content\": content_of_swimming_turtles_topic,\n },\n ]\n\n messages = [\n internal_prep_stream_message_by_name(\n realm,\n welcome_bot,\n message[\"stream\"],\n message[\"topic\"],\n message[\"content\"],\n )\n for message in welcome_messages\n ]\n message_ids = do_send_messages(messages)\n\n # We find the one of our just-sent messages with turtle.png in it,\n # and react to it. This is a bit hacky, but works and is kinda a\n # 1-off thing.\n turtle_message = Message.objects.select_for_update().get(\n id__in=message_ids, content__icontains=\"cute/turtle.png\"\n )\n (emoji_code, reaction_type) = emoji_name_to_emoji_code(realm, \"turtle\")\n do_add_reaction(welcome_bot, turtle_message, \"turtle\", emoji_code, reaction_type)\n", "path": "zerver/lib/onboarding.py"}], "after_files": [{"content": "from typing import Dict, List\n\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import override as override_language\n\nfrom zerver.actions.create_realm import setup_realm_internal_bots\nfrom zerver.actions.message_send import (\n do_send_messages,\n internal_prep_stream_message_by_name,\n internal_send_private_message,\n)\nfrom zerver.actions.reactions import do_add_reaction\nfrom zerver.lib.emoji import emoji_name_to_emoji_code\nfrom zerver.lib.message import SendMessageRequest\nfrom zerver.models import Message, Realm, UserProfile, get_system_bot\n\n\ndef missing_any_realm_internal_bots() -> bool:\n bot_emails = [\n bot[\"email_template\"] % (settings.INTERNAL_BOT_DOMAIN,)\n for bot in settings.REALM_INTERNAL_BOTS\n ]\n bot_counts = {\n email: count\n for email, count in UserProfile.objects.filter(email__in=bot_emails)\n .values_list(\"email\")\n .annotate(Count(\"id\"))\n }\n realm_count = Realm.objects.count()\n return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)\n\n\ndef create_if_missing_realm_internal_bots() -> None:\n \"\"\"This checks if there is any realm internal bot missing.\n\n If that is the case, it creates the missing realm internal bots.\n \"\"\"\n if missing_any_realm_internal_bots():\n for realm in Realm.objects.all():\n setup_realm_internal_bots(realm)\n\n\ndef send_initial_pms(user: UserProfile) -> None:\n organization_setup_text = \"\"\n\n # We need to override the language in this code path, because it's\n # called from account registration, which is a pre-account API\n # request and thus may not have the user's language context yet.\n with override_language(user.default_language):\n if user.is_realm_admin:\n help_url = user.realm.uri + \"/help/getting-your-organization-started-with-zulip\"\n organization_setup_text = (\n \" \" + _(\"We also have a guide for [Setting up your organization]({help_url}).\")\n ).format(help_url=help_url)\n\n welcome_msg = _(\"Hello, and welcome to Zulip!\") + \"\ud83d\udc4b\"\n demo_org_warning = \"\"\n if user.realm.demo_organization_scheduled_deletion_date is not None:\n demo_org_warning = (\n _(\n \"Note that this is a [demo organization]({demo_org_help_url}) and will be \"\n \"**automatically deleted** in 30 days.\"\n )\n + \"\\n\\n\"\n )\n\n content = \"\".join(\n [\n welcome_msg + \" \",\n _(\"This is a private message from me, Welcome Bot.\") + \"\\n\\n\",\n _(\n \"If you are new to Zulip, check out our [Getting started guide]({getting_started_url})!\"\n ),\n \"{organization_setup_text}\" + \"\\n\\n\",\n \"{demo_org_warning}\",\n _(\n \"I can also help you get set up! Just click anywhere on this message or press `r` to reply.\"\n )\n + \"\\n\\n\",\n _(\"Here are a few messages I understand:\") + \" \",\n bot_commands(),\n ]\n )\n\n content = content.format(\n organization_setup_text=organization_setup_text,\n demo_org_warning=demo_org_warning,\n demo_org_help_url=\"/help/demo-organizations\",\n getting_started_url=\"/help/getting-started-with-zulip\",\n )\n\n internal_send_private_message(\n get_system_bot(settings.WELCOME_BOT, user.realm_id), user, content\n )\n\n\ndef bot_commands(no_help_command: bool = False) -> str:\n commands = [\n \"apps\",\n \"profile\",\n \"theme\",\n \"streams\",\n \"topics\",\n \"message formatting\",\n \"keyboard shortcuts\",\n ]\n if not no_help_command:\n commands.append(\"help\")\n return \", \".join([\"`\" + command + \"`\" for command in commands]) + \".\"\n\n\ndef select_welcome_bot_response(human_response_lower: str) -> str:\n # Given the raw (pre-markdown-rendering) content for a private\n # message from the user to Welcome Bot, select the appropriate reply.\n if human_response_lower in [\"app\", \"apps\"]:\n return _(\n \"You can [download](/apps) the [mobile and desktop apps](/apps). \"\n \"Zulip also works great in a browser.\"\n )\n elif human_response_lower == \"profile\":\n return _(\n \"Go to [Profile settings](#settings/profile) \"\n \"to add a [profile picture](/help/change-your-profile-picture) \"\n \"and edit your [profile information](/help/edit-your-profile).\"\n )\n elif human_response_lower == \"theme\":\n return _(\n \"Go to [Display settings](#settings/display-settings) \"\n \"to [switch between the light and dark themes](/help/dark-theme), \"\n \"[pick your favorite emoji theme](/help/emoji-and-emoticons#change-your-emoji-set), \"\n \"[change your language](/help/change-your-language), \"\n \"and make other tweaks to your Zulip experience.\"\n )\n elif human_response_lower in [\"stream\", \"streams\", \"channel\", \"channels\"]:\n return \"\".join(\n [\n _(\n \"In Zulip, streams [determine who gets a message](/help/streams-and-topics). \"\n \"They are similar to channels in other chat apps.\"\n )\n + \"\\n\\n\",\n _(\"[Browse and subscribe to streams](#streams/all).\"),\n ]\n )\n elif human_response_lower in [\"topic\", \"topics\"]:\n return \"\".join(\n [\n _(\n \"In Zulip, topics [tell you what a message is about](/help/streams-and-topics). \"\n \"They are light-weight subjects, very similar to the subject line of an email.\"\n )\n + \"\\n\\n\",\n _(\n \"Check out [Recent conversations](#recent) to see what's happening! \"\n 'You can return to this conversation by clicking \"Private messages\" in the upper left.'\n ),\n ]\n )\n elif human_response_lower in [\"keyboard\", \"shortcuts\", \"keyboard shortcuts\"]:\n return \"\".join(\n [\n _(\n \"Zulip's [keyboard shortcuts](#keyboard-shortcuts) \"\n \"let you navigate the app quickly and efficiently.\"\n )\n + \"\\n\\n\",\n _(\"Press `?` any time to see a [cheat sheet](#keyboard-shortcuts).\"),\n ]\n )\n elif human_response_lower in [\"formatting\", \"message formatting\"]:\n return \"\".join(\n [\n _(\n \"Zulip uses [Markdown](/help/format-your-message-using-markdown), \"\n \"an intuitive format for **bold**, *italics*, bulleted lists, and more. \"\n \"Click [here](#message-formatting) for a cheat sheet.\"\n )\n + \"\\n\\n\",\n _(\n \"Check out our [messaging tips](/help/messaging-tips) \"\n \"to learn about emoji reactions, code blocks and much more!\"\n ),\n ]\n )\n elif human_response_lower in [\"help\", \"?\"]:\n return \"\".join(\n [\n _(\"Here are a few messages I understand:\") + \" \",\n bot_commands(no_help_command=True) + \"\\n\\n\",\n _(\n \"Check out our [Getting started guide](/help/getting-started-with-zulip), \"\n \"or browse the [Help center](/help/) to learn more!\"\n ),\n ]\n )\n else:\n return \"\".join(\n [\n _(\n \"I\u2019m sorry, I did not understand your message. Please try one of the following commands:\"\n )\n + \" \",\n bot_commands(),\n ]\n )\n\n\ndef send_welcome_bot_response(send_request: SendMessageRequest) -> None:\n \"\"\"Given the send_request object for a private message from the user\n to welcome-bot, trigger the welcome-bot reply.\"\"\"\n welcome_bot = get_system_bot(settings.WELCOME_BOT, send_request.message.sender.realm_id)\n human_response_lower = send_request.message.content.lower()\n content = select_welcome_bot_response(human_response_lower)\n internal_send_private_message(welcome_bot, send_request.message.sender, content)\n\n\[email protected]\ndef send_initial_realm_messages(realm: Realm) -> None:\n welcome_bot = get_system_bot(settings.WELCOME_BOT, realm.id)\n # Make sure each stream created in the realm creation process has at least one message below\n # Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home\n # view slightly less overwhelming\n content_of_private_streams_topic = (\n _(\"This is a private stream, as indicated by the lock icon next to the stream name.\")\n + \" \"\n + _(\"Private streams are only visible to stream members.\")\n + \"\\n\"\n \"\\n\"\n + _(\n \"To manage this stream, go to [Stream settings]({stream_settings_url}) \"\n \"and click on `{initial_private_stream_name}`.\"\n )\n ).format(\n stream_settings_url=\"#streams/subscribed\",\n initial_private_stream_name=Realm.INITIAL_PRIVATE_STREAM_NAME,\n )\n\n content1_of_topic_demonstration_topic = (\n _(\n \"This is a message on stream #**{default_notification_stream_name}** with the \"\n \"topic `topic demonstration`.\"\n )\n ).format(default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME)\n\n content2_of_topic_demonstration_topic = (\n _(\"Topics are a lightweight tool to keep conversations organized.\")\n + \" \"\n + _(\"You can learn more about topics at [Streams and topics]({about_topics_help_url}).\")\n ).format(about_topics_help_url=\"/help/streams-and-topics\")\n\n content_of_swimming_turtles_topic = (\n _(\n \"This is a message on stream #**{default_notification_stream_name}** with the \"\n \"topic `swimming turtles`.\"\n )\n + \"\\n\"\n \"\\n\"\n \"[](/static/images/cute/turtle.png)\"\n \"\\n\"\n \"\\n\"\n + _(\n \"[Start a new topic]({start_topic_help_url}) any time you're not replying to a \\\n previous message.\"\n )\n ).format(\n default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n start_topic_help_url=\"/help/start-a-new-topic\",\n )\n\n welcome_messages: List[Dict[str, str]] = [\n {\n \"stream\": Realm.INITIAL_PRIVATE_STREAM_NAME,\n \"topic\": \"private streams\",\n \"content\": content_of_private_streams_topic,\n },\n {\n \"stream\": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"topic demonstration\",\n \"content\": content1_of_topic_demonstration_topic,\n },\n {\n \"stream\": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"topic demonstration\",\n \"content\": content2_of_topic_demonstration_topic,\n },\n {\n \"stream\": realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"swimming turtles\",\n \"content\": content_of_swimming_turtles_topic,\n },\n ]\n\n messages = [\n internal_prep_stream_message_by_name(\n realm,\n welcome_bot,\n message[\"stream\"],\n message[\"topic\"],\n message[\"content\"],\n )\n for message in welcome_messages\n ]\n message_ids = do_send_messages(messages)\n\n # We find the one of our just-sent messages with turtle.png in it,\n # and react to it. This is a bit hacky, but works and is kinda a\n # 1-off thing.\n turtle_message = Message.objects.select_for_update().get(\n id__in=message_ids, content__icontains=\"cute/turtle.png\"\n )\n (emoji_code, reaction_type) = emoji_name_to_emoji_code(realm, \"turtle\")\n do_add_reaction(welcome_bot, turtle_message, \"turtle\", emoji_code, reaction_type)\n", "path": "zerver/lib/onboarding.py"}]} | 3,745 | 123 |
gh_patches_debug_27161 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-18228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for nzz.ch
rudolffischer@BueroPC-RF:~$ youtube-dl "http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209" -v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209', '-v']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2014.12.06.1
[debug] Python version 2.7.6 - Linux-3.13.0-39-generic-x86_64-with-Ubuntu-14.04-trusty
[debug] exe versions: rtmpdump 2.4
[debug] Proxy map: {}
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Requesting header
WARNING: Falling back on generic information extractor.
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Downloading webpage
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Extracting information
ERROR: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 651, in _real_extract
doc = parse_xml(webpage)
File "/usr/local/bin/youtube-dl/youtube_dl/utils.py", line 1425, in parse_xml
tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1300, in XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1642, in feed
self._raiseerror(v)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
raise err
ParseError: not well-formed (invalid token): line 2, column 42
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 553, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 241, in extract
return self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 1044, in _real_extract
raise ExtractorError('Unsupported URL: %s' % url)
ExtractorError: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
rudolffischer@BueroPC-RF:~$
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/nzz.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 extract_attributes,
9 )
10
11
12 class NZZIE(InfoExtractor):
13 _VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
14 _TEST = {
15 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
16 'info_dict': {
17 'id': '9153',
18 },
19 'playlist_mincount': 6,
20 }
21
22 def _real_extract(self, url):
23 page_id = self._match_id(url)
24 webpage = self._download_webpage(url, page_id)
25
26 entries = []
27 for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
28 player_params = extract_attributes(player_element)
29 if player_params.get('data-type') not in ('kaltura_singleArticle',):
30 self.report_warning('Unsupported player type')
31 continue
32 entry_id = player_params['data-id']
33 entries.append(self.url_result(
34 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))
35
36 return self.playlist_result(entries, page_id)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/nzz.py b/youtube_dl/extractor/nzz.py
--- a/youtube_dl/extractor/nzz.py
+++ b/youtube_dl/extractor/nzz.py
@@ -11,20 +11,27 @@
class NZZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
'info_dict': {
'id': '9153',
},
'playlist_mincount': 6,
- }
+ }, {
+ 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',
+ 'info_dict': {
+ 'id': '1368112',
+ },
+ 'playlist_count': 1,
+ }]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
entries = []
- for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
+ for player_element in re.findall(
+ r'(<[^>]+class="kalturaPlayer[^"]*"[^>]*>)', webpage):
player_params = extract_attributes(player_element)
if player_params.get('data-type') not in ('kaltura_singleArticle',):
self.report_warning('Unsupported player type')
| {"golden_diff": "diff --git a/youtube_dl/extractor/nzz.py b/youtube_dl/extractor/nzz.py\n--- a/youtube_dl/extractor/nzz.py\n+++ b/youtube_dl/extractor/nzz.py\n@@ -11,20 +11,27 @@\n \n class NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n- _TEST = {\n+ _TESTS = [{\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n- }\n+ }, {\n+ 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',\n+ 'info_dict': {\n+ 'id': '1368112',\n+ },\n+ 'playlist_count': 1,\n+ }]\n \n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n \n entries = []\n- for player_element in re.findall(r'(<[^>]+class=\"kalturaPlayer\"[^>]*>)', webpage):\n+ for player_element in re.findall(\n+ r'(<[^>]+class=\"kalturaPlayer[^\"]*\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n", "issue": "Add support for nzz.ch\nrudolffischer@BueroPC-RF:~$ youtube-dl \"http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209\" -v\n[debug] System config: []\n[debug] User config: []\n[debug] Command-line args: ['http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209', '-v']\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\n[debug] youtube-dl version 2014.12.06.1\n[debug] Python version 2.7.6 - Linux-3.13.0-39-generic-x86_64-with-Ubuntu-14.04-trusty\n[debug] exe versions: rtmpdump 2.4\n[debug] Proxy map: {}\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Requesting header\nWARNING: Falling back on generic information extractor.\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Downloading webpage\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Extracting information\nERROR: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 651, in _real_extract\n doc = parse_xml(webpage)\n File \"/usr/local/bin/youtube-dl/youtube_dl/utils.py\", line 1425, in parse_xml\n tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1300, in XML\n parser.feed(text)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1642, in feed\n self._raiseerror(v)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1506, in _raiseerror\n raise err\nParseError: not well-formed (invalid token): line 2, column 42\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 553, in extract_info\n ie_result = ie.extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 241, in extract\n return self._real_extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 1044, in _real_extract\n raise ExtractorError('Unsupported URL: %s' % url)\nExtractorError: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\n\nrudolffischer@BueroPC-RF:~$ \n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n extract_attributes,\n)\n\n\nclass NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n _TEST = {\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n }\n\n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n\n entries = []\n for player_element in re.findall(r'(<[^>]+class=\"kalturaPlayer\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n continue\n entry_id = player_params['data-id']\n entries.append(self.url_result(\n 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))\n\n return self.playlist_result(entries, page_id)\n", "path": "youtube_dl/extractor/nzz.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n extract_attributes,\n)\n\n\nclass NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n _TESTS = [{\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n }, {\n 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',\n 'info_dict': {\n 'id': '1368112',\n },\n 'playlist_count': 1,\n }]\n\n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n\n entries = []\n for player_element in re.findall(\n r'(<[^>]+class=\"kalturaPlayer[^\"]*\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n continue\n entry_id = player_params['data-id']\n entries.append(self.url_result(\n 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))\n\n return self.playlist_result(entries, page_id)\n", "path": "youtube_dl/extractor/nzz.py"}]} | 1,537 | 420 |
gh_patches_debug_25079 | rasdani/github-patches | git_diff | Kinto__kinto-630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enabling the flush endpoint through env vars does not seem to work
I'm running Kinto + postgres with docker-compose (using the example docker-compose.yml in the repo).
Adding `KINTO_FLUSH_ENDPOINT_ENABLED` to the environment section in docker-compose.yml does not enable the flush endpoint for me. I instead had to add `kinto.flush_endpoint_enabled = true` to a custom ini file, that worked.
Can the flush endpoint be enabled through an env var like this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/__init__.py`
Content:
```
1 import pkg_resources
2 import logging
3
4 import kinto.core
5 from pyramid.config import Configurator
6 from pyramid.settings import asbool
7 from pyramid.security import Authenticated
8
9 from kinto.authorization import RouteFactory
10
11 # Module version, as defined in PEP-0396.
12 __version__ = pkg_resources.get_distribution(__package__).version
13
14 # Implemented HTTP API Version
15 HTTP_API_VERSION = '1.5'
16
17 # Main kinto logger
18 logger = logging.getLogger(__name__)
19
20
21 DEFAULT_SETTINGS = {
22 'retry_after_seconds': 3,
23 'cache_backend': 'kinto.core.cache.memory',
24 'permission_backend': 'kinto.core.permission.memory',
25 'storage_backend': 'kinto.core.storage.memory',
26 'project_docs': 'https://kinto.readthedocs.io/',
27 'bucket_create_principals': Authenticated,
28 'multiauth.authorization_policy': (
29 'kinto.authorization.AuthorizationPolicy'),
30 'experimental_collection_schema_validation': 'False',
31 'http_api_version': HTTP_API_VERSION
32 }
33
34
35 def main(global_config, config=None, **settings):
36 if not config:
37 config = Configurator(settings=settings, root_factory=RouteFactory)
38
39 # Force project name, since it determines settings prefix.
40 config.add_settings({'kinto.project_name': 'kinto'})
41
42 kinto.core.initialize(config,
43 version=__version__,
44 default_settings=DEFAULT_SETTINGS)
45
46 settings = config.get_settings()
47
48 # Expose capability
49 schema_enabled = asbool(
50 settings['experimental_collection_schema_validation']
51 )
52 if schema_enabled:
53 config.add_api_capability(
54 "schema",
55 description="Validates collection records with JSON schemas.",
56 url="http://kinto.readthedocs.io/en/latest/api/1.x/"
57 "collections.html#collection-json-schema")
58
59 # Scan Kinto views.
60 kwargs = {}
61 flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
62
63 if flush_enabled:
64 config.add_api_capability(
65 "flush_endpoint",
66 description="The __flush__ endpoint can be used to remove all "
67 "data from all backends.",
68 url="http://kinto.readthedocs.io/en/latest/configuration/"
69 "settings.html#activating-the-flush-endpoint"
70 )
71 else:
72 kwargs['ignore'] = 'kinto.views.flush'
73 config.scan("kinto.views", **kwargs)
74
75 app = config.make_wsgi_app()
76
77 # Install middleware (idempotent if disabled)
78 return kinto.core.install_middlewares(app, settings)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/__init__.py b/kinto/__init__.py
--- a/kinto/__init__.py
+++ b/kinto/__init__.py
@@ -19,6 +19,7 @@
DEFAULT_SETTINGS = {
+ 'flush_endpoint_enabled': False,
'retry_after_seconds': 3,
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
@@ -58,18 +59,18 @@
# Scan Kinto views.
kwargs = {}
- flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
+ flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
if flush_enabled:
config.add_api_capability(
"flush_endpoint",
description="The __flush__ endpoint can be used to remove all "
"data from all backends.",
url="http://kinto.readthedocs.io/en/latest/configuration/"
- "settings.html#activating-the-flush-endpoint"
- )
+ "settings.html#activating-the-flush-endpoint")
else:
kwargs['ignore'] = 'kinto.views.flush'
+
config.scan("kinto.views", **kwargs)
app = config.make_wsgi_app()
| {"golden_diff": "diff --git a/kinto/__init__.py b/kinto/__init__.py\n--- a/kinto/__init__.py\n+++ b/kinto/__init__.py\n@@ -19,6 +19,7 @@\n \n \n DEFAULT_SETTINGS = {\n+ 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n@@ -58,18 +59,18 @@\n \n # Scan Kinto views.\n kwargs = {}\n- flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n \n+ flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n- \"settings.html#activating-the-flush-endpoint\"\n- )\n+ \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n+\n config.scan(\"kinto.views\", **kwargs)\n \n app = config.make_wsgi_app()\n", "issue": "Enabling the flush endpoint through env vars does not seem to work\nI'm running Kinto + postgres with docker-compose (using the example docker-compose.yml in the repo). \n\nAdding `KINTO_FLUSH_ENDPOINT_ENABLED` to the environment section in docker-compose.yml does not enable the flush endpoint for me. I instead had to add `kinto.flush_endpoint_enabled = true` to a custom ini file, that worked.\n\nCan the flush endpoint be enabled through an env var like this?\n\n", "before_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated\n\nfrom kinto.authorization import RouteFactory\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.5'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': 'False',\n 'http_api_version': HTTP_API_VERSION\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"http://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\"\n )\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (idempotent if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}], "after_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated\n\nfrom kinto.authorization import RouteFactory\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.5'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': 'False',\n 'http_api_version': HTTP_API_VERSION\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"http://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (idempotent if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}]} | 1,043 | 272 |
gh_patches_debug_42551 | rasdani/github-patches | git_diff | feast-dev__feast-2348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs] Explain DynamoDB online_write_batch uses a batch_writer
## Expected Behavior
`DynamoDBOnlineStore` method `online_write_batch` uses a [BatchWriter](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html#batch-writing) under the hood.
User documentation should explain the advantages of using it, including
> automatically handle buffering and sending items in batches. In addition, the batch writer will also automatically handle any unprocessed items and resend them as needed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/infra/online_stores/dynamodb.py`
Content:
```
1 # Copyright 2021 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15 from datetime import datetime
16 from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
17
18 from pydantic import StrictStr
19 from pydantic.typing import Literal
20
21 from feast import Entity, FeatureView, utils
22 from feast.infra.infra_object import DYNAMODB_INFRA_OBJECT_CLASS_TYPE, InfraObject
23 from feast.infra.online_stores.helpers import compute_entity_id
24 from feast.infra.online_stores.online_store import OnlineStore
25 from feast.protos.feast.core.DynamoDBTable_pb2 import (
26 DynamoDBTable as DynamoDBTableProto,
27 )
28 from feast.protos.feast.core.InfraObject_pb2 import InfraObject as InfraObjectProto
29 from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
30 from feast.protos.feast.types.Value_pb2 import Value as ValueProto
31 from feast.repo_config import FeastConfigBaseModel, RepoConfig
32 from feast.usage import log_exceptions_and_usage, tracing_span
33
34 try:
35 import boto3
36 from botocore.exceptions import ClientError
37 except ImportError as e:
38 from feast.errors import FeastExtrasDependencyImportError
39
40 raise FeastExtrasDependencyImportError("aws", str(e))
41
42
43 logger = logging.getLogger(__name__)
44
45
46 class DynamoDBOnlineStoreConfig(FeastConfigBaseModel):
47 """Online store config for DynamoDB store"""
48
49 type: Literal["dynamodb"] = "dynamodb"
50 """Online store type selector"""
51
52 region: StrictStr
53 """ AWS Region Name """
54
55
56 class DynamoDBOnlineStore(OnlineStore):
57 """
58 Online feature store for AWS DynamoDB.
59 """
60
61 _dynamodb_client = None
62 _dynamodb_resource = None
63
64 @log_exceptions_and_usage(online_store="dynamodb")
65 def update(
66 self,
67 config: RepoConfig,
68 tables_to_delete: Sequence[FeatureView],
69 tables_to_keep: Sequence[FeatureView],
70 entities_to_delete: Sequence[Entity],
71 entities_to_keep: Sequence[Entity],
72 partial: bool,
73 ):
74 online_config = config.online_store
75 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
76 dynamodb_client = self._get_dynamodb_client(online_config.region)
77 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
78
79 for table_instance in tables_to_keep:
80 try:
81 dynamodb_resource.create_table(
82 TableName=_get_table_name(config, table_instance),
83 KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
84 AttributeDefinitions=[
85 {"AttributeName": "entity_id", "AttributeType": "S"}
86 ],
87 BillingMode="PAY_PER_REQUEST",
88 )
89 except ClientError as ce:
90 # If the table creation fails with ResourceInUseException,
91 # it means the table already exists or is being created.
92 # Otherwise, re-raise the exception
93 if ce.response["Error"]["Code"] != "ResourceInUseException":
94 raise
95
96 for table_instance in tables_to_keep:
97 dynamodb_client.get_waiter("table_exists").wait(
98 TableName=_get_table_name(config, table_instance)
99 )
100
101 for table_to_delete in tables_to_delete:
102 _delete_table_idempotent(
103 dynamodb_resource, _get_table_name(config, table_to_delete)
104 )
105
106 def teardown(
107 self,
108 config: RepoConfig,
109 tables: Sequence[FeatureView],
110 entities: Sequence[Entity],
111 ):
112 online_config = config.online_store
113 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
114 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
115
116 for table in tables:
117 _delete_table_idempotent(dynamodb_resource, _get_table_name(config, table))
118
119 @log_exceptions_and_usage(online_store="dynamodb")
120 def online_write_batch(
121 self,
122 config: RepoConfig,
123 table: FeatureView,
124 data: List[
125 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
126 ],
127 progress: Optional[Callable[[int], Any]],
128 ) -> None:
129 online_config = config.online_store
130 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
131 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
132
133 table_instance = dynamodb_resource.Table(_get_table_name(config, table))
134 with table_instance.batch_writer() as batch:
135 for entity_key, features, timestamp, created_ts in data:
136 entity_id = compute_entity_id(entity_key)
137 batch.put_item(
138 Item={
139 "entity_id": entity_id, # PartitionKey
140 "event_ts": str(utils.make_tzaware(timestamp)),
141 "values": {
142 k: v.SerializeToString()
143 for k, v in features.items() # Serialized Features
144 },
145 }
146 )
147 if progress:
148 progress(1)
149
150 @log_exceptions_and_usage(online_store="dynamodb")
151 def online_read(
152 self,
153 config: RepoConfig,
154 table: FeatureView,
155 entity_keys: List[EntityKeyProto],
156 requested_features: Optional[List[str]] = None,
157 ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
158 online_config = config.online_store
159 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
160 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
161
162 result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
163 for entity_key in entity_keys:
164 table_instance = dynamodb_resource.Table(_get_table_name(config, table))
165 entity_id = compute_entity_id(entity_key)
166 with tracing_span(name="remote_call"):
167 response = table_instance.get_item(Key={"entity_id": entity_id})
168 value = response.get("Item")
169
170 if value is not None:
171 res = {}
172 for feature_name, value_bin in value["values"].items():
173 val = ValueProto()
174 val.ParseFromString(value_bin.value)
175 res[feature_name] = val
176 result.append((datetime.fromisoformat(value["event_ts"]), res))
177 else:
178 result.append((None, None))
179 return result
180
181 def _get_dynamodb_client(self, region: str):
182 if self._dynamodb_client is None:
183 self._dynamodb_client = _initialize_dynamodb_client(region)
184 return self._dynamodb_client
185
186 def _get_dynamodb_resource(self, region: str):
187 if self._dynamodb_resource is None:
188 self._dynamodb_resource = _initialize_dynamodb_resource(region)
189 return self._dynamodb_resource
190
191
192 def _initialize_dynamodb_client(region: str):
193 return boto3.client("dynamodb", region_name=region)
194
195
196 def _initialize_dynamodb_resource(region: str):
197 return boto3.resource("dynamodb", region_name=region)
198
199
200 def _get_table_name(config: RepoConfig, table: FeatureView) -> str:
201 return f"{config.project}.{table.name}"
202
203
204 def _delete_table_idempotent(
205 dynamodb_resource, table_name: str,
206 ):
207 try:
208 table = dynamodb_resource.Table(table_name)
209 table.delete()
210 logger.info(f"Dynamo table {table_name} was deleted")
211 except ClientError as ce:
212 # If the table deletion fails with ResourceNotFoundException,
213 # it means the table has already been deleted.
214 # Otherwise, re-raise the exception
215 if ce.response["Error"]["Code"] != "ResourceNotFoundException":
216 raise
217 else:
218 logger.warning(f"Trying to delete table that doesn't exist: {table_name}")
219
220
221 class DynamoDBTable(InfraObject):
222 """
223 A DynamoDB table managed by Feast.
224
225 Attributes:
226 name: The name of the table.
227 region: The region of the table.
228 """
229
230 region: str
231
232 def __init__(self, name: str, region: str):
233 super().__init__(name)
234 self.region = region
235
236 def to_infra_object_proto(self) -> InfraObjectProto:
237 dynamodb_table_proto = self.to_proto()
238 return InfraObjectProto(
239 infra_object_class_type=DYNAMODB_INFRA_OBJECT_CLASS_TYPE,
240 dynamodb_table=dynamodb_table_proto,
241 )
242
243 def to_proto(self) -> Any:
244 dynamodb_table_proto = DynamoDBTableProto()
245 dynamodb_table_proto.name = self.name
246 dynamodb_table_proto.region = self.region
247 return dynamodb_table_proto
248
249 @staticmethod
250 def from_infra_object_proto(infra_object_proto: InfraObjectProto) -> Any:
251 return DynamoDBTable(
252 name=infra_object_proto.dynamodb_table.name,
253 region=infra_object_proto.dynamodb_table.region,
254 )
255
256 @staticmethod
257 def from_proto(dynamodb_table_proto: DynamoDBTableProto) -> Any:
258 return DynamoDBTable(
259 name=dynamodb_table_proto.name, region=dynamodb_table_proto.region,
260 )
261
262 def update(self):
263 dynamodb_client = _initialize_dynamodb_client(region=self.region)
264 dynamodb_resource = _initialize_dynamodb_resource(region=self.region)
265
266 try:
267 dynamodb_resource.create_table(
268 TableName=f"{self.name}",
269 KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
270 AttributeDefinitions=[
271 {"AttributeName": "entity_id", "AttributeType": "S"}
272 ],
273 BillingMode="PAY_PER_REQUEST",
274 )
275 except ClientError as ce:
276 # If the table creation fails with ResourceInUseException,
277 # it means the table already exists or is being created.
278 # Otherwise, re-raise the exception
279 if ce.response["Error"]["Code"] != "ResourceInUseException":
280 raise
281
282 dynamodb_client.get_waiter("table_exists").wait(TableName=f"{self.name}")
283
284 def teardown(self):
285 dynamodb_resource = _initialize_dynamodb_resource(region=self.region)
286 _delete_table_idempotent(dynamodb_resource, self.name)
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/infra/online_stores/dynamodb.py b/sdk/python/feast/infra/online_stores/dynamodb.py
--- a/sdk/python/feast/infra/online_stores/dynamodb.py
+++ b/sdk/python/feast/infra/online_stores/dynamodb.py
@@ -56,6 +56,10 @@
class DynamoDBOnlineStore(OnlineStore):
"""
Online feature store for AWS DynamoDB.
+
+ Attributes:
+ _dynamodb_client: Boto3 DynamoDB client.
+ _dynamodb_resource: Boto3 DynamoDB resource.
"""
_dynamodb_client = None
@@ -71,6 +75,14 @@
entities_to_keep: Sequence[Entity],
partial: bool,
):
+ """
+ Update tables from the DynamoDB Online Store.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ tables_to_delete: Tables to delete from the DynamoDB Online Store.
+ tables_to_keep: Tables to keep in the DynamoDB Online Store.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_client = self._get_dynamodb_client(online_config.region)
@@ -109,6 +121,13 @@
tables: Sequence[FeatureView],
entities: Sequence[Entity],
):
+ """
+ Delete tables from the DynamoDB Online Store.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ tables: Tables to delete from the feature repo.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_resource = self._get_dynamodb_resource(online_config.region)
@@ -126,6 +145,21 @@
],
progress: Optional[Callable[[int], Any]],
) -> None:
+ """
+ Write a batch of feature rows to online DynamoDB store.
+
+ Note: This method applies a ``batch_writer`` to automatically handle any unprocessed items
+ and resend them as needed, this is useful if you're loading a lot of data at a time.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ table: Feast FeatureView.
+ data: a list of quadruplets containing Feature data. Each quadruplet contains an Entity Key,
+ a dict containing feature values, an event timestamp for the row, and
+ the created timestamp for the row if it exists.
+ progress: Optional function to be called once every mini-batch of rows is written to
+ the online store. Can be used to display progress.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_resource = self._get_dynamodb_resource(online_config.region)
@@ -155,6 +189,17 @@
entity_keys: List[EntityKeyProto],
requested_features: Optional[List[str]] = None,
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
+ """
+ Retrieve feature values from the online DynamoDB store.
+
+ Note: This method is currently not optimized to retrieve a lot of data at a time
+ as it does sequential gets from the DynamoDB table.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ table: Feast FeatureView.
+ entity_keys: a list of entity keys that should be read from the FeatureStore.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_resource = self._get_dynamodb_resource(online_config.region)
| {"golden_diff": "diff --git a/sdk/python/feast/infra/online_stores/dynamodb.py b/sdk/python/feast/infra/online_stores/dynamodb.py\n--- a/sdk/python/feast/infra/online_stores/dynamodb.py\n+++ b/sdk/python/feast/infra/online_stores/dynamodb.py\n@@ -56,6 +56,10 @@\n class DynamoDBOnlineStore(OnlineStore):\n \"\"\"\n Online feature store for AWS DynamoDB.\n+\n+ Attributes:\n+ _dynamodb_client: Boto3 DynamoDB client.\n+ _dynamodb_resource: Boto3 DynamoDB resource.\n \"\"\"\n \n _dynamodb_client = None\n@@ -71,6 +75,14 @@\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n+ \"\"\"\n+ Update tables from the DynamoDB Online Store.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ tables_to_delete: Tables to delete from the DynamoDB Online Store.\n+ tables_to_keep: Tables to keep in the DynamoDB Online Store.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_client = self._get_dynamodb_client(online_config.region)\n@@ -109,6 +121,13 @@\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n+ \"\"\"\n+ Delete tables from the DynamoDB Online Store.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ tables: Tables to delete from the feature repo.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n@@ -126,6 +145,21 @@\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n+ \"\"\"\n+ Write a batch of feature rows to online DynamoDB store.\n+\n+ Note: This method applies a ``batch_writer`` to automatically handle any unprocessed items\n+ and resend them as needed, this is useful if you're loading a lot of data at a time.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ table: Feast FeatureView.\n+ data: a list of quadruplets containing Feature data. Each quadruplet contains an Entity Key,\n+ a dict containing feature values, an event timestamp for the row, and\n+ the created timestamp for the row if it exists.\n+ progress: Optional function to be called once every mini-batch of rows is written to\n+ the online store. Can be used to display progress.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n@@ -155,6 +189,17 @@\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n+ \"\"\"\n+ Retrieve feature values from the online DynamoDB store.\n+\n+ Note: This method is currently not optimized to retrieve a lot of data at a time\n+ as it does sequential gets from the DynamoDB table.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ table: Feast FeatureView.\n+ entity_keys: a list of entity keys that should be read from the FeatureStore.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n", "issue": "[Docs] Explain DynamoDB online_write_batch uses a batch_writer\n## Expected Behavior \r\n\r\n`DynamoDBOnlineStore` method `online_write_batch` uses a [BatchWriter](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html#batch-writing) under the hood. \r\n\r\nUser documentation should explain the advantages of using it, including \r\n\r\n> automatically handle buffering and sending items in batches. In addition, the batch writer will also automatically handle any unprocessed items and resend them as needed. \n", "before_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureView, utils\nfrom feast.infra.infra_object import DYNAMODB_INFRA_OBJECT_CLASS_TYPE, InfraObject\nfrom feast.infra.online_stores.helpers import compute_entity_id\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.core.DynamoDBTable_pb2 import (\n DynamoDBTable as DynamoDBTableProto,\n)\nfrom feast.protos.feast.core.InfraObject_pb2 import InfraObject as InfraObjectProto\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\nfrom feast.usage import log_exceptions_and_usage, tracing_span\n\ntry:\n import boto3\n from botocore.exceptions import ClientError\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"aws\", str(e))\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DynamoDBOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for DynamoDB store\"\"\"\n\n type: Literal[\"dynamodb\"] = \"dynamodb\"\n \"\"\"Online store type selector\"\"\"\n\n region: StrictStr\n \"\"\" AWS Region Name \"\"\"\n\n\nclass DynamoDBOnlineStore(OnlineStore):\n \"\"\"\n Online feature store for AWS DynamoDB.\n \"\"\"\n\n _dynamodb_client = None\n _dynamodb_resource = None\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_client = self._get_dynamodb_client(online_config.region)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n for table_instance in tables_to_keep:\n try:\n dynamodb_resource.create_table(\n TableName=_get_table_name(config, table_instance),\n KeySchema=[{\"AttributeName\": \"entity_id\", \"KeyType\": \"HASH\"}],\n AttributeDefinitions=[\n {\"AttributeName\": \"entity_id\", \"AttributeType\": \"S\"}\n ],\n BillingMode=\"PAY_PER_REQUEST\",\n )\n except ClientError as ce:\n # If the table creation fails with ResourceInUseException,\n # it means the table already exists or is being created.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceInUseException\":\n raise\n\n for table_instance in tables_to_keep:\n dynamodb_client.get_waiter(\"table_exists\").wait(\n TableName=_get_table_name(config, table_instance)\n )\n\n for table_to_delete in tables_to_delete:\n _delete_table_idempotent(\n dynamodb_resource, _get_table_name(config, table_to_delete)\n )\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n for table in tables:\n _delete_table_idempotent(dynamodb_resource, _get_table_name(config, table))\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n table_instance = dynamodb_resource.Table(_get_table_name(config, table))\n with table_instance.batch_writer() as batch:\n for entity_key, features, timestamp, created_ts in data:\n entity_id = compute_entity_id(entity_key)\n batch.put_item(\n Item={\n \"entity_id\": entity_id, # PartitionKey\n \"event_ts\": str(utils.make_tzaware(timestamp)),\n \"values\": {\n k: v.SerializeToString()\n for k, v in features.items() # Serialized Features\n },\n }\n )\n if progress:\n progress(1)\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n for entity_key in entity_keys:\n table_instance = dynamodb_resource.Table(_get_table_name(config, table))\n entity_id = compute_entity_id(entity_key)\n with tracing_span(name=\"remote_call\"):\n response = table_instance.get_item(Key={\"entity_id\": entity_id})\n value = response.get(\"Item\")\n\n if value is not None:\n res = {}\n for feature_name, value_bin in value[\"values\"].items():\n val = ValueProto()\n val.ParseFromString(value_bin.value)\n res[feature_name] = val\n result.append((datetime.fromisoformat(value[\"event_ts\"]), res))\n else:\n result.append((None, None))\n return result\n\n def _get_dynamodb_client(self, region: str):\n if self._dynamodb_client is None:\n self._dynamodb_client = _initialize_dynamodb_client(region)\n return self._dynamodb_client\n\n def _get_dynamodb_resource(self, region: str):\n if self._dynamodb_resource is None:\n self._dynamodb_resource = _initialize_dynamodb_resource(region)\n return self._dynamodb_resource\n\n\ndef _initialize_dynamodb_client(region: str):\n return boto3.client(\"dynamodb\", region_name=region)\n\n\ndef _initialize_dynamodb_resource(region: str):\n return boto3.resource(\"dynamodb\", region_name=region)\n\n\ndef _get_table_name(config: RepoConfig, table: FeatureView) -> str:\n return f\"{config.project}.{table.name}\"\n\n\ndef _delete_table_idempotent(\n dynamodb_resource, table_name: str,\n):\n try:\n table = dynamodb_resource.Table(table_name)\n table.delete()\n logger.info(f\"Dynamo table {table_name} was deleted\")\n except ClientError as ce:\n # If the table deletion fails with ResourceNotFoundException,\n # it means the table has already been deleted.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceNotFoundException\":\n raise\n else:\n logger.warning(f\"Trying to delete table that doesn't exist: {table_name}\")\n\n\nclass DynamoDBTable(InfraObject):\n \"\"\"\n A DynamoDB table managed by Feast.\n\n Attributes:\n name: The name of the table.\n region: The region of the table.\n \"\"\"\n\n region: str\n\n def __init__(self, name: str, region: str):\n super().__init__(name)\n self.region = region\n\n def to_infra_object_proto(self) -> InfraObjectProto:\n dynamodb_table_proto = self.to_proto()\n return InfraObjectProto(\n infra_object_class_type=DYNAMODB_INFRA_OBJECT_CLASS_TYPE,\n dynamodb_table=dynamodb_table_proto,\n )\n\n def to_proto(self) -> Any:\n dynamodb_table_proto = DynamoDBTableProto()\n dynamodb_table_proto.name = self.name\n dynamodb_table_proto.region = self.region\n return dynamodb_table_proto\n\n @staticmethod\n def from_infra_object_proto(infra_object_proto: InfraObjectProto) -> Any:\n return DynamoDBTable(\n name=infra_object_proto.dynamodb_table.name,\n region=infra_object_proto.dynamodb_table.region,\n )\n\n @staticmethod\n def from_proto(dynamodb_table_proto: DynamoDBTableProto) -> Any:\n return DynamoDBTable(\n name=dynamodb_table_proto.name, region=dynamodb_table_proto.region,\n )\n\n def update(self):\n dynamodb_client = _initialize_dynamodb_client(region=self.region)\n dynamodb_resource = _initialize_dynamodb_resource(region=self.region)\n\n try:\n dynamodb_resource.create_table(\n TableName=f\"{self.name}\",\n KeySchema=[{\"AttributeName\": \"entity_id\", \"KeyType\": \"HASH\"}],\n AttributeDefinitions=[\n {\"AttributeName\": \"entity_id\", \"AttributeType\": \"S\"}\n ],\n BillingMode=\"PAY_PER_REQUEST\",\n )\n except ClientError as ce:\n # If the table creation fails with ResourceInUseException,\n # it means the table already exists or is being created.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceInUseException\":\n raise\n\n dynamodb_client.get_waiter(\"table_exists\").wait(TableName=f\"{self.name}\")\n\n def teardown(self):\n dynamodb_resource = _initialize_dynamodb_resource(region=self.region)\n _delete_table_idempotent(dynamodb_resource, self.name)\n", "path": "sdk/python/feast/infra/online_stores/dynamodb.py"}], "after_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureView, utils\nfrom feast.infra.infra_object import DYNAMODB_INFRA_OBJECT_CLASS_TYPE, InfraObject\nfrom feast.infra.online_stores.helpers import compute_entity_id\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.core.DynamoDBTable_pb2 import (\n DynamoDBTable as DynamoDBTableProto,\n)\nfrom feast.protos.feast.core.InfraObject_pb2 import InfraObject as InfraObjectProto\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\nfrom feast.usage import log_exceptions_and_usage, tracing_span\n\ntry:\n import boto3\n from botocore.exceptions import ClientError\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"aws\", str(e))\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DynamoDBOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for DynamoDB store\"\"\"\n\n type: Literal[\"dynamodb\"] = \"dynamodb\"\n \"\"\"Online store type selector\"\"\"\n\n region: StrictStr\n \"\"\" AWS Region Name \"\"\"\n\n\nclass DynamoDBOnlineStore(OnlineStore):\n \"\"\"\n Online feature store for AWS DynamoDB.\n\n Attributes:\n _dynamodb_client: Boto3 DynamoDB client.\n _dynamodb_resource: Boto3 DynamoDB resource.\n \"\"\"\n\n _dynamodb_client = None\n _dynamodb_resource = None\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n Update tables from the DynamoDB Online Store.\n\n Args:\n config: The RepoConfig for the current FeatureStore.\n tables_to_delete: Tables to delete from the DynamoDB Online Store.\n tables_to_keep: Tables to keep in the DynamoDB Online Store.\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_client = self._get_dynamodb_client(online_config.region)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n for table_instance in tables_to_keep:\n try:\n dynamodb_resource.create_table(\n TableName=_get_table_name(config, table_instance),\n KeySchema=[{\"AttributeName\": \"entity_id\", \"KeyType\": \"HASH\"}],\n AttributeDefinitions=[\n {\"AttributeName\": \"entity_id\", \"AttributeType\": \"S\"}\n ],\n BillingMode=\"PAY_PER_REQUEST\",\n )\n except ClientError as ce:\n # If the table creation fails with ResourceInUseException,\n # it means the table already exists or is being created.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceInUseException\":\n raise\n\n for table_instance in tables_to_keep:\n dynamodb_client.get_waiter(\"table_exists\").wait(\n TableName=_get_table_name(config, table_instance)\n )\n\n for table_to_delete in tables_to_delete:\n _delete_table_idempotent(\n dynamodb_resource, _get_table_name(config, table_to_delete)\n )\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n \"\"\"\n Delete tables from the DynamoDB Online Store.\n\n Args:\n config: The RepoConfig for the current FeatureStore.\n tables: Tables to delete from the feature repo.\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n for table in tables:\n _delete_table_idempotent(dynamodb_resource, _get_table_name(config, table))\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n \"\"\"\n Write a batch of feature rows to online DynamoDB store.\n\n Note: This method applies a ``batch_writer`` to automatically handle any unprocessed items\n and resend them as needed, this is useful if you're loading a lot of data at a time.\n\n Args:\n config: The RepoConfig for the current FeatureStore.\n table: Feast FeatureView.\n data: a list of quadruplets containing Feature data. Each quadruplet contains an Entity Key,\n a dict containing feature values, an event timestamp for the row, and\n the created timestamp for the row if it exists.\n progress: Optional function to be called once every mini-batch of rows is written to\n the online store. Can be used to display progress.\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n table_instance = dynamodb_resource.Table(_get_table_name(config, table))\n with table_instance.batch_writer() as batch:\n for entity_key, features, timestamp, created_ts in data:\n entity_id = compute_entity_id(entity_key)\n batch.put_item(\n Item={\n \"entity_id\": entity_id, # PartitionKey\n \"event_ts\": str(utils.make_tzaware(timestamp)),\n \"values\": {\n k: v.SerializeToString()\n for k, v in features.items() # Serialized Features\n },\n }\n )\n if progress:\n progress(1)\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n \"\"\"\n Retrieve feature values from the online DynamoDB store.\n\n Note: This method is currently not optimized to retrieve a lot of data at a time\n as it does sequential gets from the DynamoDB table.\n\n Args:\n config: The RepoConfig for the current FeatureStore.\n table: Feast FeatureView.\n entity_keys: a list of entity keys that should be read from the FeatureStore.\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n for entity_key in entity_keys:\n table_instance = dynamodb_resource.Table(_get_table_name(config, table))\n entity_id = compute_entity_id(entity_key)\n with tracing_span(name=\"remote_call\"):\n response = table_instance.get_item(Key={\"entity_id\": entity_id})\n value = response.get(\"Item\")\n\n if value is not None:\n res = {}\n for feature_name, value_bin in value[\"values\"].items():\n val = ValueProto()\n val.ParseFromString(value_bin.value)\n res[feature_name] = val\n result.append((datetime.fromisoformat(value[\"event_ts\"]), res))\n else:\n result.append((None, None))\n return result\n\n def _get_dynamodb_client(self, region: str):\n if self._dynamodb_client is None:\n self._dynamodb_client = _initialize_dynamodb_client(region)\n return self._dynamodb_client\n\n def _get_dynamodb_resource(self, region: str):\n if self._dynamodb_resource is None:\n self._dynamodb_resource = _initialize_dynamodb_resource(region)\n return self._dynamodb_resource\n\n\ndef _initialize_dynamodb_client(region: str):\n return boto3.client(\"dynamodb\", region_name=region)\n\n\ndef _initialize_dynamodb_resource(region: str):\n return boto3.resource(\"dynamodb\", region_name=region)\n\n\ndef _get_table_name(config: RepoConfig, table: FeatureView) -> str:\n return f\"{config.project}.{table.name}\"\n\n\ndef _delete_table_idempotent(\n dynamodb_resource, table_name: str,\n):\n try:\n table = dynamodb_resource.Table(table_name)\n table.delete()\n logger.info(f\"Dynamo table {table_name} was deleted\")\n except ClientError as ce:\n # If the table deletion fails with ResourceNotFoundException,\n # it means the table has already been deleted.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceNotFoundException\":\n raise\n else:\n logger.warning(f\"Trying to delete table that doesn't exist: {table_name}\")\n\n\nclass DynamoDBTable(InfraObject):\n \"\"\"\n A DynamoDB table managed by Feast.\n\n Attributes:\n name: The name of the table.\n region: The region of the table.\n \"\"\"\n\n region: str\n\n def __init__(self, name: str, region: str):\n super().__init__(name)\n self.region = region\n\n def to_infra_object_proto(self) -> InfraObjectProto:\n dynamodb_table_proto = self.to_proto()\n return InfraObjectProto(\n infra_object_class_type=DYNAMODB_INFRA_OBJECT_CLASS_TYPE,\n dynamodb_table=dynamodb_table_proto,\n )\n\n def to_proto(self) -> Any:\n dynamodb_table_proto = DynamoDBTableProto()\n dynamodb_table_proto.name = self.name\n dynamodb_table_proto.region = self.region\n return dynamodb_table_proto\n\n @staticmethod\n def from_infra_object_proto(infra_object_proto: InfraObjectProto) -> Any:\n return DynamoDBTable(\n name=infra_object_proto.dynamodb_table.name,\n region=infra_object_proto.dynamodb_table.region,\n )\n\n @staticmethod\n def from_proto(dynamodb_table_proto: DynamoDBTableProto) -> Any:\n return DynamoDBTable(\n name=dynamodb_table_proto.name, region=dynamodb_table_proto.region,\n )\n\n def update(self):\n dynamodb_client = _initialize_dynamodb_client(region=self.region)\n dynamodb_resource = _initialize_dynamodb_resource(region=self.region)\n\n try:\n dynamodb_resource.create_table(\n TableName=f\"{self.name}\",\n KeySchema=[{\"AttributeName\": \"entity_id\", \"KeyType\": \"HASH\"}],\n AttributeDefinitions=[\n {\"AttributeName\": \"entity_id\", \"AttributeType\": \"S\"}\n ],\n BillingMode=\"PAY_PER_REQUEST\",\n )\n except ClientError as ce:\n # If the table creation fails with ResourceInUseException,\n # it means the table already exists or is being created.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceInUseException\":\n raise\n\n dynamodb_client.get_waiter(\"table_exists\").wait(TableName=f\"{self.name}\")\n\n def teardown(self):\n dynamodb_resource = _initialize_dynamodb_resource(region=self.region)\n _delete_table_idempotent(dynamodb_resource, self.name)\n", "path": "sdk/python/feast/infra/online_stores/dynamodb.py"}]} | 3,384 | 829 |
gh_patches_debug_63214 | rasdani/github-patches | git_diff | ManimCommunity__manim-3108 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The documentation for the `--resolution` flag in the cli is wrong
The current documentation of the `--resolution` flag says the format is `(W,H)` which is confusing because the passed value needs to be of the form `"W,H"`so the documentation should be updatet accordingly such that it reflects the usage `-r "W,H"` best with an example of `-r "1920,1080"`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/cli/render/render_options.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4
5 import click
6 from cloup import option, option_group
7
8 from manim.constants import QUALITIES, RendererType
9
10 from ... import logger
11
12
13 def validate_scene_range(ctx, param, value):
14 try:
15 start = int(value)
16 return (start,)
17 except Exception:
18 pass
19
20 if value:
21 try:
22 start, end = map(int, re.split(r"[;,\-]", value))
23 return start, end
24 except Exception:
25 logger.error("Couldn't determine a range for -n option.")
26 exit()
27
28
29 def validate_resolution(ctx, param, value):
30 if value:
31 try:
32 start, end = map(int, re.split(r"[;,\-]", value))
33 return (start, end)
34 except Exception:
35 logger.error("Resolution option is invalid.")
36 exit()
37
38
39 render_options = option_group(
40 "Render Options",
41 option(
42 "-n",
43 "--from_animation_number",
44 callback=validate_scene_range,
45 help="Start rendering from n_0 until n_1. If n_1 is left unspecified, "
46 "renders all scenes after n_0.",
47 default=None,
48 ),
49 option(
50 "-a",
51 "--write_all",
52 is_flag=True,
53 help="Render all scenes in the input file.",
54 default=None,
55 ),
56 option(
57 "--format",
58 type=click.Choice(["png", "gif", "mp4", "webm", "mov"], case_sensitive=False),
59 default=None,
60 ),
61 option("-s", "--save_last_frame", is_flag=True, default=None),
62 option(
63 "-q",
64 "--quality",
65 default=None,
66 type=click.Choice(
67 list(reversed([q["flag"] for q in QUALITIES.values() if q["flag"]])), # type: ignore
68 case_sensitive=False,
69 ),
70 help="Render quality at the follow resolution framerates, respectively: "
71 + ", ".join(
72 reversed(
73 [
74 f'{q["pixel_width"]}x{q["pixel_height"]} {q["frame_rate"]}FPS'
75 for q in QUALITIES.values()
76 if q["flag"]
77 ]
78 )
79 ),
80 ),
81 option(
82 "-r",
83 "--resolution",
84 callback=validate_resolution,
85 default=None,
86 help="Resolution in (W,H) for when 16:9 aspect ratio isn't possible.",
87 ),
88 option(
89 "--fps",
90 "--frame_rate",
91 "frame_rate",
92 type=float,
93 default=None,
94 help="Render at this frame rate.",
95 ),
96 option(
97 "--renderer",
98 type=click.Choice(
99 [renderer_type.value for renderer_type in RendererType],
100 case_sensitive=False,
101 ),
102 help="Select a renderer for your Scene.",
103 default="cairo",
104 ),
105 option(
106 "-g",
107 "--save_pngs",
108 is_flag=True,
109 default=None,
110 help="Save each frame as png (Deprecated).",
111 ),
112 option(
113 "-i",
114 "--save_as_gif",
115 default=None,
116 is_flag=True,
117 help="Save as a gif (Deprecated).",
118 ),
119 option(
120 "--save_sections",
121 default=None,
122 is_flag=True,
123 help="Save section videos in addition to movie file.",
124 ),
125 option(
126 "-s",
127 "--save_last_frame",
128 default=None,
129 is_flag=True,
130 help="Save last frame as png (Deprecated).",
131 ),
132 option(
133 "-t",
134 "--transparent",
135 is_flag=True,
136 help="Render scenes with alpha channel.",
137 ),
138 option(
139 "--use_projection_fill_shaders",
140 is_flag=True,
141 help="Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.",
142 default=None,
143 ),
144 option(
145 "--use_projection_stroke_shaders",
146 is_flag=True,
147 help="Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.",
148 default=None,
149 ),
150 )
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/cli/render/render_options.py b/manim/cli/render/render_options.py
--- a/manim/cli/render/render_options.py
+++ b/manim/cli/render/render_options.py
@@ -83,7 +83,7 @@
"--resolution",
callback=validate_resolution,
default=None,
- help="Resolution in (W,H) for when 16:9 aspect ratio isn't possible.",
+ help='Resolution in "W,H" for when 16:9 aspect ratio isn\'t possible.',
),
option(
"--fps",
| {"golden_diff": "diff --git a/manim/cli/render/render_options.py b/manim/cli/render/render_options.py\n--- a/manim/cli/render/render_options.py\n+++ b/manim/cli/render/render_options.py\n@@ -83,7 +83,7 @@\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n- help=\"Resolution in (W,H) for when 16:9 aspect ratio isn't possible.\",\n+ help='Resolution in \"W,H\" for when 16:9 aspect ratio isn\\'t possible.',\n ),\n option(\n \"--fps\",\n", "issue": "The documentation for the `--resolution` flag in the cli is wrong\nThe current documentation of the `--resolution` flag says the format is `(W,H)` which is confusing because the passed value needs to be of the form `\"W,H\"`so the documentation should be updatet accordingly such that it reflects the usage `-r \"W,H\"` best with an example of `-r \"1920,1080\"`\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\n\nimport click\nfrom cloup import option, option_group\n\nfrom manim.constants import QUALITIES, RendererType\n\nfrom ... import logger\n\n\ndef validate_scene_range(ctx, param, value):\n try:\n start = int(value)\n return (start,)\n except Exception:\n pass\n\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return start, end\n except Exception:\n logger.error(\"Couldn't determine a range for -n option.\")\n exit()\n\n\ndef validate_resolution(ctx, param, value):\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return (start, end)\n except Exception:\n logger.error(\"Resolution option is invalid.\")\n exit()\n\n\nrender_options = option_group(\n \"Render Options\",\n option(\n \"-n\",\n \"--from_animation_number\",\n callback=validate_scene_range,\n help=\"Start rendering from n_0 until n_1. If n_1 is left unspecified, \"\n \"renders all scenes after n_0.\",\n default=None,\n ),\n option(\n \"-a\",\n \"--write_all\",\n is_flag=True,\n help=\"Render all scenes in the input file.\",\n default=None,\n ),\n option(\n \"--format\",\n type=click.Choice([\"png\", \"gif\", \"mp4\", \"webm\", \"mov\"], case_sensitive=False),\n default=None,\n ),\n option(\"-s\", \"--save_last_frame\", is_flag=True, default=None),\n option(\n \"-q\",\n \"--quality\",\n default=None,\n type=click.Choice(\n list(reversed([q[\"flag\"] for q in QUALITIES.values() if q[\"flag\"]])), # type: ignore\n case_sensitive=False,\n ),\n help=\"Render quality at the follow resolution framerates, respectively: \"\n + \", \".join(\n reversed(\n [\n f'{q[\"pixel_width\"]}x{q[\"pixel_height\"]} {q[\"frame_rate\"]}FPS'\n for q in QUALITIES.values()\n if q[\"flag\"]\n ]\n )\n ),\n ),\n option(\n \"-r\",\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n help=\"Resolution in (W,H) for when 16:9 aspect ratio isn't possible.\",\n ),\n option(\n \"--fps\",\n \"--frame_rate\",\n \"frame_rate\",\n type=float,\n default=None,\n help=\"Render at this frame rate.\",\n ),\n option(\n \"--renderer\",\n type=click.Choice(\n [renderer_type.value for renderer_type in RendererType],\n case_sensitive=False,\n ),\n help=\"Select a renderer for your Scene.\",\n default=\"cairo\",\n ),\n option(\n \"-g\",\n \"--save_pngs\",\n is_flag=True,\n default=None,\n help=\"Save each frame as png (Deprecated).\",\n ),\n option(\n \"-i\",\n \"--save_as_gif\",\n default=None,\n is_flag=True,\n help=\"Save as a gif (Deprecated).\",\n ),\n option(\n \"--save_sections\",\n default=None,\n is_flag=True,\n help=\"Save section videos in addition to movie file.\",\n ),\n option(\n \"-s\",\n \"--save_last_frame\",\n default=None,\n is_flag=True,\n help=\"Save last frame as png (Deprecated).\",\n ),\n option(\n \"-t\",\n \"--transparent\",\n is_flag=True,\n help=\"Render scenes with alpha channel.\",\n ),\n option(\n \"--use_projection_fill_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.\",\n default=None,\n ),\n option(\n \"--use_projection_stroke_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.\",\n default=None,\n ),\n)\n", "path": "manim/cli/render/render_options.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\n\nimport click\nfrom cloup import option, option_group\n\nfrom manim.constants import QUALITIES, RendererType\n\nfrom ... import logger\n\n\ndef validate_scene_range(ctx, param, value):\n try:\n start = int(value)\n return (start,)\n except Exception:\n pass\n\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return start, end\n except Exception:\n logger.error(\"Couldn't determine a range for -n option.\")\n exit()\n\n\ndef validate_resolution(ctx, param, value):\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return (start, end)\n except Exception:\n logger.error(\"Resolution option is invalid.\")\n exit()\n\n\nrender_options = option_group(\n \"Render Options\",\n option(\n \"-n\",\n \"--from_animation_number\",\n callback=validate_scene_range,\n help=\"Start rendering from n_0 until n_1. If n_1 is left unspecified, \"\n \"renders all scenes after n_0.\",\n default=None,\n ),\n option(\n \"-a\",\n \"--write_all\",\n is_flag=True,\n help=\"Render all scenes in the input file.\",\n default=None,\n ),\n option(\n \"--format\",\n type=click.Choice([\"png\", \"gif\", \"mp4\", \"webm\", \"mov\"], case_sensitive=False),\n default=None,\n ),\n option(\"-s\", \"--save_last_frame\", is_flag=True, default=None),\n option(\n \"-q\",\n \"--quality\",\n default=None,\n type=click.Choice(\n list(reversed([q[\"flag\"] for q in QUALITIES.values() if q[\"flag\"]])), # type: ignore\n case_sensitive=False,\n ),\n help=\"Render quality at the follow resolution framerates, respectively: \"\n + \", \".join(\n reversed(\n [\n f'{q[\"pixel_width\"]}x{q[\"pixel_height\"]} {q[\"frame_rate\"]}FPS'\n for q in QUALITIES.values()\n if q[\"flag\"]\n ]\n )\n ),\n ),\n option(\n \"-r\",\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n help='Resolution in \"W,H\" for when 16:9 aspect ratio isn\\'t possible.',\n ),\n option(\n \"--fps\",\n \"--frame_rate\",\n \"frame_rate\",\n type=float,\n default=None,\n help=\"Render at this frame rate.\",\n ),\n option(\n \"--renderer\",\n type=click.Choice(\n [renderer_type.value for renderer_type in RendererType],\n case_sensitive=False,\n ),\n help=\"Select a renderer for your Scene.\",\n default=\"cairo\",\n ),\n option(\n \"-g\",\n \"--save_pngs\",\n is_flag=True,\n default=None,\n help=\"Save each frame as png (Deprecated).\",\n ),\n option(\n \"-i\",\n \"--save_as_gif\",\n default=None,\n is_flag=True,\n help=\"Save as a gif (Deprecated).\",\n ),\n option(\n \"--save_sections\",\n default=None,\n is_flag=True,\n help=\"Save section videos in addition to movie file.\",\n ),\n option(\n \"-s\",\n \"--save_last_frame\",\n default=None,\n is_flag=True,\n help=\"Save last frame as png (Deprecated).\",\n ),\n option(\n \"-t\",\n \"--transparent\",\n is_flag=True,\n help=\"Render scenes with alpha channel.\",\n ),\n option(\n \"--use_projection_fill_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.\",\n default=None,\n ),\n option(\n \"--use_projection_stroke_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.\",\n default=None,\n ),\n)\n", "path": "manim/cli/render/render_options.py"}]} | 1,551 | 122 |
gh_patches_debug_15376 | rasdani/github-patches | git_diff | beeware__toga-1850 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hide webview black background when resizing
### Describe the bug
By default in Windows, a webview will render a black background behind it. When resizing the webview, this black background becomes visible for a couple of frames while the html rendered on top is still in the process of updating, resulting in black "bars" appearing at the side of the webview when resizing it.
This can look quite ugly
### Steps to reproduce
see: https://youtu.be/Xj1rW4L-Pqk
```py
"""
My first application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
web = toga.WebView(url='https://google.com')
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = web
self.main_window.show()
def main():
helloWorld = HelloWorld("foobar",
"org.foo.bar")
return helloWorld
main().main_loop()
```
### Environment
- Operating System:
- Windows 11, winforms
- Python version:
- 3.10
- Software versions:
- Briefcase:
- 0.3.12
- Toga:
- 0.3.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `winforms/src/toga_winforms/widgets/webview.py`
Content:
```
1 import traceback
2 import webbrowser
3 from asyncio import get_event_loop
4
5 from travertino.size import at_least
6
7 import toga
8 from toga_winforms.keys import toga_key
9 from toga_winforms.libs import (
10 Action,
11 CoreWebView2CreationProperties,
12 String,
13 Task,
14 TaskScheduler,
15 Uri,
16 WebView2,
17 WebView2RuntimeNotFoundException,
18 WinForms,
19 )
20
21 from .base import Widget
22
23
24 class TogaWebBrowser(WebView2):
25 def __init__(self, interface):
26 super().__init__()
27 self.interface = interface
28 self._edge_runtime_available = None # Set to an unknown state initially
29
30
31 class WebView(Widget):
32 def create(self):
33 self.native = TogaWebBrowser(self.interface)
34 self.native.CoreWebView2InitializationCompleted += (
35 self.winforms_initialization_completed
36 )
37 self.native.NavigationCompleted += self.winforms_navigation_completed
38 self.native.KeyDown += self.winforms_key_down
39
40 props = CoreWebView2CreationProperties()
41 props.UserDataFolder = str(toga.App.app.paths.cache / "WebView2")
42 self.native.CreationProperties = props
43
44 # Trigger the configuration of the webview
45 self.native.EnsureCoreWebView2Async(None)
46
47 def winforms_initialization_completed(self, sender, args):
48 # The WebView2 widget has an "internal" widget (CoreWebView2) that is
49 # the actual web view. The view isn't ready until the internal widget has
50 # completed initialization, and that isn't done until an explicit
51 # request is made (EnsureCoreWebView2Async).
52 if args.IsSuccess:
53 # We've initialized, so we must have the runtime
54 self.native._edge_runtime_available = True
55 try:
56 settings = self.native.CoreWebView2.Settings
57
58 debug = True
59 settings.AreDefaultContextMenusEnabled = debug
60 settings.AreDefaultScriptDialogsEnabled = True
61 settings.AreDevToolsEnabled = debug
62 settings.IsBuiltInErrorPageEnabled = True
63 settings.IsScriptEnabled = True
64 settings.IsWebMessageEnabled = True
65 settings.IsStatusBarEnabled = debug
66 settings.IsZoomControlEnabled = True
67
68 self.set_user_agent(self.interface.user_agent)
69
70 if self.interface._html_content:
71 self.set_content(self.interface.url, self.interface._html_content)
72 else:
73 self.set_url(self.interface.url)
74
75 except Exception:
76 traceback.print_exc()
77 else:
78 if isinstance(
79 args.InitializationException, WebView2RuntimeNotFoundException
80 ):
81 print("Could not find the Microsoft Edge WebView2 Runtime.")
82 if self.native._edge_runtime_available is None:
83 # The initialize message is sent twice on failure.
84 # We only want to show the dialog once, so track that we
85 # know the runtime is missing.
86 self.native._edge_runtime_available = False
87 WinForms.MessageBox.Show(
88 "The Microsoft Edge WebView2 Runtime is not installed. "
89 "Web content will not be displayed.\n\n"
90 "Click OK to download the WebView2 Evergreen Runtime "
91 "Bootstrapper from Microsoft.",
92 "Missing Edge Webview2 runtime",
93 WinForms.MessageBoxButtons.OK,
94 WinForms.MessageBoxIcon.Error,
95 )
96 webbrowser.open(
97 "https://developer.microsoft.com/en-us/microsoft-edge/webview2/#download-section"
98 )
99 else:
100 print(args.InitializationException)
101
102 def winforms_navigation_completed(self, sender, args):
103 if self.interface.on_webview_load:
104 self.interface.on_webview_load(self.interface)
105
106 def winforms_key_down(self, sender, args):
107 if self.interface.on_key_down:
108 self.interface.on_key_down(self.interface, **toga_key(args))
109
110 def set_on_key_down(self, handler):
111 pass
112
113 def set_on_webview_load(self, handler):
114 pass
115
116 def get_url(self):
117 return str(self.native.Source)
118
119 def set_url(self, value):
120 if value:
121 self.native.Source = Uri(value)
122
123 def set_content(self, root_url, content):
124 if content and self.native.CoreWebView2:
125 self.native.CoreWebView2.NavigateToString(content)
126
127 def get_dom(self):
128 self.interface.factory.not_implemented("WebView.get_dom()")
129
130 def set_user_agent(self, value):
131 user_agent = (
132 value
133 if value
134 else (
135 "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
136 "(KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46"
137 )
138 )
139 if self.native.CoreWebView2:
140 self.native.CoreWebView2.Settings.UserAgent = user_agent
141
142 async def evaluate_javascript(self, javascript):
143 loop = get_event_loop()
144 future = loop.create_future()
145
146 task_scheduler = TaskScheduler.FromCurrentSynchronizationContext()
147 try:
148
149 def callback(task):
150 future.set_result(task.Result)
151
152 self.native.ExecuteScriptAsync(javascript).ContinueWith(
153 Action[Task[String]](callback), task_scheduler
154 )
155 except Exception:
156 traceback.print_exc()
157 future.set_result(None)
158
159 return await future
160
161 def invoke_javascript(self, javascript):
162 # The script will execute async, but you weren't going to get the result
163 # anyway, so it doesn't really matter.
164 self.native.ExecuteScriptAsync(javascript)
165
166 def rehint(self):
167 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
168 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/winforms/src/toga_winforms/widgets/webview.py b/winforms/src/toga_winforms/widgets/webview.py
--- a/winforms/src/toga_winforms/widgets/webview.py
+++ b/winforms/src/toga_winforms/widgets/webview.py
@@ -8,6 +8,7 @@
from toga_winforms.keys import toga_key
from toga_winforms.libs import (
Action,
+ Color,
CoreWebView2CreationProperties,
String,
Task,
@@ -43,6 +44,7 @@
# Trigger the configuration of the webview
self.native.EnsureCoreWebView2Async(None)
+ self.native.DefaultBackgroundColor = Color.Transparent
def winforms_initialization_completed(self, sender, args):
# The WebView2 widget has an "internal" widget (CoreWebView2) that is
| {"golden_diff": "diff --git a/winforms/src/toga_winforms/widgets/webview.py b/winforms/src/toga_winforms/widgets/webview.py\n--- a/winforms/src/toga_winforms/widgets/webview.py\n+++ b/winforms/src/toga_winforms/widgets/webview.py\n@@ -8,6 +8,7 @@\n from toga_winforms.keys import toga_key\n from toga_winforms.libs import (\n Action,\n+ Color,\n CoreWebView2CreationProperties,\n String,\n Task,\n@@ -43,6 +44,7 @@\n \n # Trigger the configuration of the webview\n self.native.EnsureCoreWebView2Async(None)\n+ self.native.DefaultBackgroundColor = Color.Transparent\n \n def winforms_initialization_completed(self, sender, args):\n # The WebView2 widget has an \"internal\" widget (CoreWebView2) that is\n", "issue": "Hide webview black background when resizing\n### Describe the bug\r\n\r\nBy default in Windows, a webview will render a black background behind it. When resizing the webview, this black background becomes visible for a couple of frames while the html rendered on top is still in the process of updating, resulting in black \"bars\" appearing at the side of the webview when resizing it.\r\n\r\nThis can look quite ugly\r\n\r\n### Steps to reproduce\r\n\r\nsee: https://youtu.be/Xj1rW4L-Pqk\r\n\r\n```py\r\n\"\"\"\r\nMy first application\r\n\"\"\"\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\n\r\nclass HelloWorld(toga.App):\r\n\r\n def startup(self):\r\n \"\"\"\r\n Construct and show the Toga application.\r\n\r\n Usually, you would add your application to a main content box.\r\n We then create a main window (with a name matching the app), and\r\n show the main window.\r\n \"\"\"\r\n web = toga.WebView(url='https://google.com')\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = web\r\n self.main_window.show()\r\n\r\n\r\n\r\n\r\ndef main():\r\n helloWorld = HelloWorld(\"foobar\",\r\n \"org.foo.bar\")\r\n return helloWorld\r\n\r\n\r\nmain().main_loop()\r\n```\r\n\r\n### Environment\r\n\r\n- Operating System:\r\n- Windows 11, winforms\r\n- Python version:\r\n- 3.10\r\n- Software versions:\r\n - Briefcase:\r\n - 0.3.12\r\n - Toga:\r\n - 0.3.0\r\n\r\n\n", "before_files": [{"content": "import traceback\nimport webbrowser\nfrom asyncio import get_event_loop\n\nfrom travertino.size import at_least\n\nimport toga\nfrom toga_winforms.keys import toga_key\nfrom toga_winforms.libs import (\n Action,\n CoreWebView2CreationProperties,\n String,\n Task,\n TaskScheduler,\n Uri,\n WebView2,\n WebView2RuntimeNotFoundException,\n WinForms,\n)\n\nfrom .base import Widget\n\n\nclass TogaWebBrowser(WebView2):\n def __init__(self, interface):\n super().__init__()\n self.interface = interface\n self._edge_runtime_available = None # Set to an unknown state initially\n\n\nclass WebView(Widget):\n def create(self):\n self.native = TogaWebBrowser(self.interface)\n self.native.CoreWebView2InitializationCompleted += (\n self.winforms_initialization_completed\n )\n self.native.NavigationCompleted += self.winforms_navigation_completed\n self.native.KeyDown += self.winforms_key_down\n\n props = CoreWebView2CreationProperties()\n props.UserDataFolder = str(toga.App.app.paths.cache / \"WebView2\")\n self.native.CreationProperties = props\n\n # Trigger the configuration of the webview\n self.native.EnsureCoreWebView2Async(None)\n\n def winforms_initialization_completed(self, sender, args):\n # The WebView2 widget has an \"internal\" widget (CoreWebView2) that is\n # the actual web view. The view isn't ready until the internal widget has\n # completed initialization, and that isn't done until an explicit\n # request is made (EnsureCoreWebView2Async).\n if args.IsSuccess:\n # We've initialized, so we must have the runtime\n self.native._edge_runtime_available = True\n try:\n settings = self.native.CoreWebView2.Settings\n\n debug = True\n settings.AreDefaultContextMenusEnabled = debug\n settings.AreDefaultScriptDialogsEnabled = True\n settings.AreDevToolsEnabled = debug\n settings.IsBuiltInErrorPageEnabled = True\n settings.IsScriptEnabled = True\n settings.IsWebMessageEnabled = True\n settings.IsStatusBarEnabled = debug\n settings.IsZoomControlEnabled = True\n\n self.set_user_agent(self.interface.user_agent)\n\n if self.interface._html_content:\n self.set_content(self.interface.url, self.interface._html_content)\n else:\n self.set_url(self.interface.url)\n\n except Exception:\n traceback.print_exc()\n else:\n if isinstance(\n args.InitializationException, WebView2RuntimeNotFoundException\n ):\n print(\"Could not find the Microsoft Edge WebView2 Runtime.\")\n if self.native._edge_runtime_available is None:\n # The initialize message is sent twice on failure.\n # We only want to show the dialog once, so track that we\n # know the runtime is missing.\n self.native._edge_runtime_available = False\n WinForms.MessageBox.Show(\n \"The Microsoft Edge WebView2 Runtime is not installed. \"\n \"Web content will not be displayed.\\n\\n\"\n \"Click OK to download the WebView2 Evergreen Runtime \"\n \"Bootstrapper from Microsoft.\",\n \"Missing Edge Webview2 runtime\",\n WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error,\n )\n webbrowser.open(\n \"https://developer.microsoft.com/en-us/microsoft-edge/webview2/#download-section\"\n )\n else:\n print(args.InitializationException)\n\n def winforms_navigation_completed(self, sender, args):\n if self.interface.on_webview_load:\n self.interface.on_webview_load(self.interface)\n\n def winforms_key_down(self, sender, args):\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(args))\n\n def set_on_key_down(self, handler):\n pass\n\n def set_on_webview_load(self, handler):\n pass\n\n def get_url(self):\n return str(self.native.Source)\n\n def set_url(self, value):\n if value:\n self.native.Source = Uri(value)\n\n def set_content(self, root_url, content):\n if content and self.native.CoreWebView2:\n self.native.CoreWebView2.NavigateToString(content)\n\n def get_dom(self):\n self.interface.factory.not_implemented(\"WebView.get_dom()\")\n\n def set_user_agent(self, value):\n user_agent = (\n value\n if value\n else (\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46\"\n )\n )\n if self.native.CoreWebView2:\n self.native.CoreWebView2.Settings.UserAgent = user_agent\n\n async def evaluate_javascript(self, javascript):\n loop = get_event_loop()\n future = loop.create_future()\n\n task_scheduler = TaskScheduler.FromCurrentSynchronizationContext()\n try:\n\n def callback(task):\n future.set_result(task.Result)\n\n self.native.ExecuteScriptAsync(javascript).ContinueWith(\n Action[Task[String]](callback), task_scheduler\n )\n except Exception:\n traceback.print_exc()\n future.set_result(None)\n\n return await future\n\n def invoke_javascript(self, javascript):\n # The script will execute async, but you weren't going to get the result\n # anyway, so it doesn't really matter.\n self.native.ExecuteScriptAsync(javascript)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "winforms/src/toga_winforms/widgets/webview.py"}], "after_files": [{"content": "import traceback\nimport webbrowser\nfrom asyncio import get_event_loop\n\nfrom travertino.size import at_least\n\nimport toga\nfrom toga_winforms.keys import toga_key\nfrom toga_winforms.libs import (\n Action,\n Color,\n CoreWebView2CreationProperties,\n String,\n Task,\n TaskScheduler,\n Uri,\n WebView2,\n WebView2RuntimeNotFoundException,\n WinForms,\n)\n\nfrom .base import Widget\n\n\nclass TogaWebBrowser(WebView2):\n def __init__(self, interface):\n super().__init__()\n self.interface = interface\n self._edge_runtime_available = None # Set to an unknown state initially\n\n\nclass WebView(Widget):\n def create(self):\n self.native = TogaWebBrowser(self.interface)\n self.native.CoreWebView2InitializationCompleted += (\n self.winforms_initialization_completed\n )\n self.native.NavigationCompleted += self.winforms_navigation_completed\n self.native.KeyDown += self.winforms_key_down\n\n props = CoreWebView2CreationProperties()\n props.UserDataFolder = str(toga.App.app.paths.cache / \"WebView2\")\n self.native.CreationProperties = props\n\n # Trigger the configuration of the webview\n self.native.EnsureCoreWebView2Async(None)\n self.native.DefaultBackgroundColor = Color.Transparent\n\n def winforms_initialization_completed(self, sender, args):\n # The WebView2 widget has an \"internal\" widget (CoreWebView2) that is\n # the actual web view. The view isn't ready until the internal widget has\n # completed initialization, and that isn't done until an explicit\n # request is made (EnsureCoreWebView2Async).\n if args.IsSuccess:\n # We've initialized, so we must have the runtime\n self.native._edge_runtime_available = True\n try:\n settings = self.native.CoreWebView2.Settings\n\n debug = True\n settings.AreDefaultContextMenusEnabled = debug\n settings.AreDefaultScriptDialogsEnabled = True\n settings.AreDevToolsEnabled = debug\n settings.IsBuiltInErrorPageEnabled = True\n settings.IsScriptEnabled = True\n settings.IsWebMessageEnabled = True\n settings.IsStatusBarEnabled = debug\n settings.IsZoomControlEnabled = True\n\n self.set_user_agent(self.interface.user_agent)\n\n if self.interface._html_content:\n self.set_content(self.interface.url, self.interface._html_content)\n else:\n self.set_url(self.interface.url)\n\n except Exception:\n traceback.print_exc()\n else:\n if isinstance(\n args.InitializationException, WebView2RuntimeNotFoundException\n ):\n print(\"Could not find the Microsoft Edge WebView2 Runtime.\")\n if self.native._edge_runtime_available is None:\n # The initialize message is sent twice on failure.\n # We only want to show the dialog once, so track that we\n # know the runtime is missing.\n self.native._edge_runtime_available = False\n WinForms.MessageBox.Show(\n \"The Microsoft Edge WebView2 Runtime is not installed. \"\n \"Web content will not be displayed.\\n\\n\"\n \"Click OK to download the WebView2 Evergreen Runtime \"\n \"Bootstrapper from Microsoft.\",\n \"Missing Edge Webview2 runtime\",\n WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error,\n )\n webbrowser.open(\n \"https://developer.microsoft.com/en-us/microsoft-edge/webview2/#download-section\"\n )\n else:\n print(args.InitializationException)\n\n def winforms_navigation_completed(self, sender, args):\n if self.interface.on_webview_load:\n self.interface.on_webview_load(self.interface)\n\n def winforms_key_down(self, sender, args):\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(args))\n\n def set_on_key_down(self, handler):\n pass\n\n def set_on_webview_load(self, handler):\n pass\n\n def get_url(self):\n return str(self.native.Source)\n\n def set_url(self, value):\n if value:\n self.native.Source = Uri(value)\n\n def set_content(self, root_url, content):\n if content and self.native.CoreWebView2:\n self.native.CoreWebView2.NavigateToString(content)\n\n def get_dom(self):\n self.interface.factory.not_implemented(\"WebView.get_dom()\")\n\n def set_user_agent(self, value):\n user_agent = (\n value\n if value\n else (\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46\"\n )\n )\n if self.native.CoreWebView2:\n self.native.CoreWebView2.Settings.UserAgent = user_agent\n\n async def evaluate_javascript(self, javascript):\n loop = get_event_loop()\n future = loop.create_future()\n\n task_scheduler = TaskScheduler.FromCurrentSynchronizationContext()\n try:\n\n def callback(task):\n future.set_result(task.Result)\n\n self.native.ExecuteScriptAsync(javascript).ContinueWith(\n Action[Task[String]](callback), task_scheduler\n )\n except Exception:\n traceback.print_exc()\n future.set_result(None)\n\n return await future\n\n def invoke_javascript(self, javascript):\n # The script will execute async, but you weren't going to get the result\n # anyway, so it doesn't really matter.\n self.native.ExecuteScriptAsync(javascript)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "winforms/src/toga_winforms/widgets/webview.py"}]} | 2,213 | 182 |
gh_patches_debug_39951 | rasdani/github-patches | git_diff | liqd__a4-opin-346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Choose template: Small issues
There are some small wording issues when you choose a template to create a project in the dashboard. See comments in screenshot.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/dashboard/templatetags/dashboard_templatetags.py`
Content:
```
1 from django import template
2
3 register = template.Library()
4
5
6 @register.simple_tag
7 def selected(request, pattern):
8 path = request.path
9 if path == pattern:
10 return 'selected'
11 return ''
12
```
Path: `euth/dashboard/urls.py`
Content:
```
1 from django.conf.urls import url
2
3 from . import views
4
5 urlpatterns = [
6 url(
7 r'^$',
8 views.dashboard,
9 name='dashboard'),
10 url(
11 r'^profile$',
12 views.DashboardProfileView.as_view(),
13 name='dashboard-profile'),
14 url(
15 r'^email/$',
16 views.DashboardEmailView.as_view(),
17 name='dashboard-email'
18 ),
19 url(
20 r'^connections/$',
21 views.DashboardAccountView.as_view(),
22 name='dashboard-connections'
23 ),
24 url(
25 r'^(?P<organisation_slug>[-\w_]+)/$',
26 views.DashboardOrganisationUpdateView.as_view(),
27 name='dashboard-organisation-edit'
28 ),
29 url(
30 r'^(?P<organisation_slug>[-\w_]+)/projects/$',
31 views.DashboardProjectListView.as_view(),
32 name='dashboard-project-list'),
33 url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/$',
34 views.DashboardBlueprintListView.as_view(),
35 name='dashboard-blueprint-list'),
36 url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/'
37 r'(?P<blueprint_slug>[-\w_]+)/$',
38 views.DashboardProjectCreateView.as_view(),
39 name='dashboard-project-create'),
40 url(
41 r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/$',
42 views.DashboardProjectUpdateView.as_view(),
43 name='dashboard-project-edit'
44 ),
45 url(
46 r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/users$',
47 views.DashboardProjectUserView.as_view(),
48 name='dashboard-project-users'
49 ),
50 url(
51 r'^(?P<organisation_slug>[-\w_]+)/projects/'
52 r'(?P<slug>[-\w_]+)/users/invite$',
53 views.DashboardProjectInviteView.as_view(),
54 name='dashboard-project-invite'
55 ),
56 ]
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/dashboard/templatetags/dashboard_templatetags.py b/euth/dashboard/templatetags/dashboard_templatetags.py
deleted file mode 100644
--- a/euth/dashboard/templatetags/dashboard_templatetags.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from django import template
-
-register = template.Library()
-
-
[email protected]_tag
-def selected(request, pattern):
- path = request.path
- if path == pattern:
- return 'selected'
- return ''
diff --git a/euth/dashboard/urls.py b/euth/dashboard/urls.py
--- a/euth/dashboard/urls.py
+++ b/euth/dashboard/urls.py
@@ -10,47 +10,57 @@
url(
r'^profile$',
views.DashboardProfileView.as_view(),
+ {'dashboard_menu_item': 'profile'},
name='dashboard-profile'),
url(
r'^email/$',
views.DashboardEmailView.as_view(),
+ {'dashboard_menu_item': 'email'},
name='dashboard-email'
),
url(
r'^connections/$',
views.DashboardAccountView.as_view(),
+ {'dashboard_menu_item': 'connections'},
name='dashboard-connections'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/$',
views.DashboardOrganisationUpdateView.as_view(),
+ {'dashboard_menu_item': 'organisation'},
name='dashboard-organisation-edit'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/$',
views.DashboardProjectListView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-list'),
url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/$',
views.DashboardBlueprintListView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-blueprint-list'),
url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/'
r'(?P<blueprint_slug>[-\w_]+)/$',
views.DashboardProjectCreateView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-create'),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/$',
views.DashboardProjectUpdateView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-edit'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/users$',
views.DashboardProjectUserView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-users'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/'
r'(?P<slug>[-\w_]+)/users/invite$',
views.DashboardProjectInviteView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-invite'
),
]
| {"golden_diff": "diff --git a/euth/dashboard/templatetags/dashboard_templatetags.py b/euth/dashboard/templatetags/dashboard_templatetags.py\ndeleted file mode 100644\n--- a/euth/dashboard/templatetags/dashboard_templatetags.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-from django import template\n-\n-register = template.Library()\n-\n-\[email protected]_tag\n-def selected(request, pattern):\n- path = request.path\n- if path == pattern:\n- return 'selected'\n- return ''\ndiff --git a/euth/dashboard/urls.py b/euth/dashboard/urls.py\n--- a/euth/dashboard/urls.py\n+++ b/euth/dashboard/urls.py\n@@ -10,47 +10,57 @@\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n+ {'dashboard_menu_item': 'profile'},\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n+ {'dashboard_menu_item': 'email'},\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n+ {'dashboard_menu_item': 'connections'},\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n+ {'dashboard_menu_item': 'organisation'},\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-invite'\n ),\n ]\n", "issue": "Choose template: Small issues\nThere are some small wording issues when you choose a template to create a project in the dashboard. See comments in screenshot.\n\n\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef selected(request, pattern):\n path = request.path\n if path == pattern:\n return 'selected'\n return ''\n", "path": "euth/dashboard/templatetags/dashboard_templatetags.py"}, {"content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^$',\n views.dashboard,\n name='dashboard'),\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n name='dashboard-project-invite'\n ),\n]\n", "path": "euth/dashboard/urls.py"}], "after_files": [{"content": null, "path": "euth/dashboard/templatetags/dashboard_templatetags.py"}, {"content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^$',\n views.dashboard,\n name='dashboard'),\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n {'dashboard_menu_item': 'profile'},\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n {'dashboard_menu_item': 'email'},\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n {'dashboard_menu_item': 'connections'},\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n {'dashboard_menu_item': 'organisation'},\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-invite'\n ),\n]\n", "path": "euth/dashboard/urls.py"}]} | 986 | 685 |
gh_patches_debug_5112 | rasdani/github-patches | git_diff | ray-project__ray-6233 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kubernetes Docker Container untagged
The container used by the kubernetes manifests (rayproject/autoscaler) doesn't have any useful tags. Furthermore the contents of the container are actually built using github.com/edoakes/ray and the container is two months old.
It might be better to properly tag the containers with versions and ensure they are built from the main repo (by travis) so that there aren't small differences in the code and it stays up to date.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/setup.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import glob
6 import os
7 import re
8 import shutil
9 import subprocess
10 import sys
11
12 from setuptools import setup, find_packages, Distribution
13 import setuptools.command.build_ext as _build_ext
14
15 # Ideally, we could include these files by putting them in a
16 # MANIFEST.in or using the package_data argument to setup, but the
17 # MANIFEST.in gets applied at the very beginning when setup.py runs
18 # before these files have been created, so we have to move the files
19 # manually.
20
21 # NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
22
23 ray_files = [
24 "ray/core/src/ray/thirdparty/redis/src/redis-server",
25 "ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
26 "ray/core/src/plasma/plasma_store_server",
27 "ray/_raylet.so",
28 "ray/core/src/ray/raylet/raylet_monitor",
29 "ray/core/src/ray/raylet/raylet",
30 "ray/dashboard/dashboard.py",
31 ]
32
33 # These are the directories where automatically generated Python protobuf
34 # bindings are created.
35 generated_python_directories = [
36 "ray/core/generated",
37 ]
38
39 optional_ray_files = []
40
41 ray_autoscaler_files = [
42 "ray/autoscaler/aws/example-full.yaml",
43 "ray/autoscaler/gcp/example-full.yaml",
44 "ray/autoscaler/local/example-full.yaml",
45 "ray/autoscaler/kubernetes/example-full.yaml",
46 "ray/autoscaler/kubernetes/kubectl-rsync.sh",
47 ]
48
49 ray_project_files = [
50 "ray/projects/schema.json", "ray/projects/templates/cluster_template.yaml",
51 "ray/projects/templates/project_template.yaml",
52 "ray/projects/templates/requirements.txt"
53 ]
54
55 ray_dashboard_files = [
56 "ray/dashboard/client/build/favicon.ico",
57 "ray/dashboard/client/build/index.html",
58 ]
59 for dirname in ["css", "js", "media"]:
60 ray_dashboard_files += glob.glob(
61 "ray/dashboard/client/build/static/{}/*".format(dirname))
62
63 optional_ray_files += ray_autoscaler_files
64 optional_ray_files += ray_project_files
65 optional_ray_files += ray_dashboard_files
66
67 if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
68 ray_files += [
69 "ray/core/src/credis/build/src/libmember.so",
70 "ray/core/src/credis/build/src/libmaster.so",
71 "ray/core/src/credis/redis/src/redis-server"
72 ]
73
74 extras = {
75 "rllib": [
76 "pyyaml", "gym[atari]", "opencv-python-headless", "lz4", "scipy",
77 "tabulate"
78 ],
79 "debug": ["psutil", "setproctitle", "py-spy >= 0.2.0"],
80 "dashboard": ["aiohttp", "google", "grpcio", "psutil", "setproctitle"],
81 "serve": ["uvicorn", "pygments", "werkzeug", "flask", "pandas"],
82 "tune": ["tabulate"],
83 }
84
85
86 class build_ext(_build_ext.build_ext):
87 def run(self):
88 # Note: We are passing in sys.executable so that we use the same
89 # version of Python to build pyarrow inside the build.sh script. Note
90 # that certain flags will not be passed along such as --user or sudo.
91 # TODO(rkn): Fix this.
92 command = ["../build.sh", "-p", sys.executable]
93 if os.getenv("RAY_INSTALL_JAVA") == "1":
94 # Also build binaries for Java if the above env variable exists.
95 command += ["-l", "python,java"]
96 subprocess.check_call(command)
97
98 # We also need to install pyarrow along with Ray, so make sure that the
99 # relevant non-Python pyarrow files get copied.
100 pyarrow_files = []
101 for (root, dirs, filenames) in os.walk("./ray/pyarrow_files/pyarrow"):
102 for name in filenames:
103 pyarrow_files.append(os.path.join(root, name))
104
105 # We also need to install pickle5 along with Ray, so make sure that the
106 # relevant non-Python pickle5 files get copied.
107 pickle5_files = []
108 for (root, dirs, filenames) in os.walk("./ray/pickle5_files/pickle5"):
109 for name in filenames:
110 pickle5_files.append(os.path.join(root, name))
111
112 files_to_include = ray_files + pyarrow_files + pickle5_files
113
114 # Copy over the autogenerated protobuf Python bindings.
115 for directory in generated_python_directories:
116 for filename in os.listdir(directory):
117 if filename[-3:] == ".py":
118 files_to_include.append(os.path.join(directory, filename))
119
120 for filename in files_to_include:
121 self.move_file(filename)
122
123 # Try to copy over the optional files.
124 for filename in optional_ray_files:
125 try:
126 self.move_file(filename)
127 except Exception:
128 print("Failed to copy optional file {}. This is ok."
129 .format(filename))
130
131 def move_file(self, filename):
132 # TODO(rkn): This feels very brittle. It may not handle all cases. See
133 # https://github.com/apache/arrow/blob/master/python/setup.py for an
134 # example.
135 source = filename
136 destination = os.path.join(self.build_lib, filename)
137 # Create the target directory if it doesn't already exist.
138 parent_directory = os.path.dirname(destination)
139 if not os.path.exists(parent_directory):
140 os.makedirs(parent_directory)
141 if not os.path.exists(destination):
142 print("Copying {} to {}.".format(source, destination))
143 shutil.copy(source, destination)
144
145
146 class BinaryDistribution(Distribution):
147 def has_ext_modules(self):
148 return True
149
150
151 def find_version(*filepath):
152 # Extract version information from filepath
153 here = os.path.abspath(os.path.dirname(__file__))
154 with open(os.path.join(here, *filepath)) as fp:
155 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
156 fp.read(), re.M)
157 if version_match:
158 return version_match.group(1)
159 raise RuntimeError("Unable to find version string.")
160
161
162 requires = [
163 "numpy >= 1.14",
164 "filelock",
165 "jsonschema",
166 "funcsigs",
167 "click",
168 "colorama",
169 "pytest",
170 "pyyaml",
171 "redis>=3.3.2",
172 # NOTE: Don't upgrade the version of six! Doing so causes installation
173 # problems. See https://github.com/ray-project/ray/issues/4169.
174 "six >= 1.0.0",
175 "faulthandler;python_version<'3.3'",
176 "protobuf >= 3.8.0",
177 ]
178
179 setup(
180 name="ray",
181 version=find_version("ray", "__init__.py"),
182 author="Ray Team",
183 author_email="[email protected]",
184 description=("A system for parallel and distributed Python that unifies "
185 "the ML ecosystem."),
186 long_description=open("../README.rst").read(),
187 url="https://github.com/ray-project/ray",
188 keywords=("ray distributed parallel machine-learning "
189 "reinforcement-learning deep-learning python"),
190 packages=find_packages(),
191 cmdclass={"build_ext": build_ext},
192 # The BinaryDistribution argument triggers build_ext.
193 distclass=BinaryDistribution,
194 install_requires=requires,
195 setup_requires=["cython >= 0.29"],
196 extras_require=extras,
197 entry_points={
198 "console_scripts": [
199 "ray=ray.scripts.scripts:main",
200 "rllib=ray.rllib.scripts:cli [rllib]", "tune=ray.tune.scripts:cli"
201 ]
202 },
203 include_package_data=True,
204 zip_safe=False,
205 license="Apache 2.0")
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -3,6 +3,7 @@
from __future__ import print_function
import glob
+from itertools import chain
import os
import re
import shutil
@@ -82,6 +83,8 @@
"tune": ["tabulate"],
}
+extras["all"] = list(set(chain.from_iterable(extras.values())))
+
class build_ext(_build_ext.build_ext):
def run(self):
| {"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -3,6 +3,7 @@\n from __future__ import print_function\n \n import glob\n+from itertools import chain\n import os\n import re\n import shutil\n@@ -82,6 +83,8 @@\n \"tune\": [\"tabulate\"],\n }\n \n+extras[\"all\"] = list(set(chain.from_iterable(extras.values())))\n+\n \n class build_ext(_build_ext.build_ext):\n def run(self):\n", "issue": "Kubernetes Docker Container untagged\nThe container used by the kubernetes manifests (rayproject/autoscaler) doesn't have any useful tags. Furthermore the contents of the container are actually built using github.com/edoakes/ray and the container is two months old.\r\n\r\nIt might be better to properly tag the containers with versions and ensure they are built from the main repo (by travis) so that there aren't small differences in the code and it stays up to date.\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.\n\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\",\n \"ray/_raylet.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\",\n \"ray/core/src/ray/raylet/raylet\",\n \"ray/dashboard/dashboard.py\",\n]\n\n# These are the directories where automatically generated Python protobuf\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\",\n]\n\noptional_ray_files = []\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n \"ray/autoscaler/kubernetes/example-full.yaml\",\n \"ray/autoscaler/kubernetes/kubectl-rsync.sh\",\n]\n\nray_project_files = [\n \"ray/projects/schema.json\", \"ray/projects/templates/cluster_template.yaml\",\n \"ray/projects/templates/project_template.yaml\",\n \"ray/projects/templates/requirements.txt\"\n]\n\nray_dashboard_files = [\n \"ray/dashboard/client/build/favicon.ico\",\n \"ray/dashboard/client/build/index.html\",\n]\nfor dirname in [\"css\", \"js\", \"media\"]:\n ray_dashboard_files += glob.glob(\n \"ray/dashboard/client/build/static/{}/*\".format(dirname))\n\noptional_ray_files += ray_autoscaler_files\noptional_ray_files += ray_project_files\noptional_ray_files += ray_dashboard_files\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\nextras = {\n \"rllib\": [\n \"pyyaml\", \"gym[atari]\", \"opencv-python-headless\", \"lz4\", \"scipy\",\n \"tabulate\"\n ],\n \"debug\": [\"psutil\", \"setproctitle\", \"py-spy >= 0.2.0\"],\n \"dashboard\": [\"aiohttp\", \"google\", \"grpcio\", \"psutil\", \"setproctitle\"],\n \"serve\": [\"uvicorn\", \"pygments\", \"werkzeug\", \"flask\", \"pandas\"],\n \"tune\": [\"tabulate\"],\n}\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n command = [\"../build.sh\", \"-p\", sys.executable]\n if os.getenv(\"RAY_INSTALL_JAVA\") == \"1\":\n # Also build binaries for Java if the above env variable exists.\n command += [\"-l\", \"python,java\"]\n subprocess.check_call(command)\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pyarrow_files/pyarrow\"):\n for name in filenames:\n pyarrow_files.append(os.path.join(root, name))\n\n # We also need to install pickle5 along with Ray, so make sure that the\n # relevant non-Python pickle5 files get copied.\n pickle5_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pickle5_files/pickle5\"):\n for name in filenames:\n pickle5_files.append(os.path.join(root, name))\n\n files_to_include = ray_files + pyarrow_files + pickle5_files\n\n # Copy over the autogenerated protobuf Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n if not os.path.exists(destination):\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy >= 1.14\",\n \"filelock\",\n \"jsonschema\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"pytest\",\n \"pyyaml\",\n \"redis>=3.3.2\",\n # NOTE: Don't upgrade the version of six! Doing so causes installation\n # problems. See https://github.com/ray-project/ray/issues/4169.\n \"six >= 1.0.0\",\n \"faulthandler;python_version<'3.3'\",\n \"protobuf >= 3.8.0\",\n]\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n author=\"Ray Team\",\n author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\", \"tune=ray.tune.scripts:cli\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport glob\nfrom itertools import chain\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.\n\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\",\n \"ray/_raylet.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\",\n \"ray/core/src/ray/raylet/raylet\",\n \"ray/dashboard/dashboard.py\",\n]\n\n# These are the directories where automatically generated Python protobuf\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\",\n]\n\noptional_ray_files = []\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n \"ray/autoscaler/kubernetes/example-full.yaml\",\n \"ray/autoscaler/kubernetes/kubectl-rsync.sh\",\n]\n\nray_project_files = [\n \"ray/projects/schema.json\", \"ray/projects/templates/cluster_template.yaml\",\n \"ray/projects/templates/project_template.yaml\",\n \"ray/projects/templates/requirements.txt\"\n]\n\nray_dashboard_files = [\n \"ray/dashboard/client/build/favicon.ico\",\n \"ray/dashboard/client/build/index.html\",\n]\nfor dirname in [\"css\", \"js\", \"media\"]:\n ray_dashboard_files += glob.glob(\n \"ray/dashboard/client/build/static/{}/*\".format(dirname))\n\noptional_ray_files += ray_autoscaler_files\noptional_ray_files += ray_project_files\noptional_ray_files += ray_dashboard_files\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\nextras = {\n \"rllib\": [\n \"pyyaml\", \"gym[atari]\", \"opencv-python-headless\", \"lz4\", \"scipy\",\n \"tabulate\"\n ],\n \"debug\": [\"psutil\", \"setproctitle\", \"py-spy >= 0.2.0\"],\n \"dashboard\": [\"aiohttp\", \"google\", \"grpcio\", \"psutil\", \"setproctitle\"],\n \"serve\": [\"uvicorn\", \"pygments\", \"werkzeug\", \"flask\", \"pandas\"],\n \"tune\": [\"tabulate\"],\n}\n\nextras[\"all\"] = list(set(chain.from_iterable(extras.values())))\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n command = [\"../build.sh\", \"-p\", sys.executable]\n if os.getenv(\"RAY_INSTALL_JAVA\") == \"1\":\n # Also build binaries for Java if the above env variable exists.\n command += [\"-l\", \"python,java\"]\n subprocess.check_call(command)\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pyarrow_files/pyarrow\"):\n for name in filenames:\n pyarrow_files.append(os.path.join(root, name))\n\n # We also need to install pickle5 along with Ray, so make sure that the\n # relevant non-Python pickle5 files get copied.\n pickle5_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pickle5_files/pickle5\"):\n for name in filenames:\n pickle5_files.append(os.path.join(root, name))\n\n files_to_include = ray_files + pyarrow_files + pickle5_files\n\n # Copy over the autogenerated protobuf Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n if not os.path.exists(destination):\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy >= 1.14\",\n \"filelock\",\n \"jsonschema\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"pytest\",\n \"pyyaml\",\n \"redis>=3.3.2\",\n # NOTE: Don't upgrade the version of six! Doing so causes installation\n # problems. See https://github.com/ray-project/ray/issues/4169.\n \"six >= 1.0.0\",\n \"faulthandler;python_version<'3.3'\",\n \"protobuf >= 3.8.0\",\n]\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n author=\"Ray Team\",\n author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\", \"tune=ray.tune.scripts:cli\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]} | 2,578 | 115 |
gh_patches_debug_11539 | rasdani/github-patches | git_diff | Pylons__pyramid-2277 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyobject truncates code at comment
See https://github.com/sphinx-doc/sphinx/issues/2253
Example rendered docs:
http://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tour.html#handling-web-requests-and-responses
rst syntax:
https://github.com/Pylons/pyramid/blame/master/docs/quick_tour.rst#L119-L120
Source code:
https://github.com/Pylons/pyramid/blob/master/docs/quick_tour/requests/app.py#L7
When the bug is fixed and released, we will need to:
- revert the source code sample to use `#` style comments
- bump up the Sphinx version
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21 is_pypy = '__pypy__' in sys.builtin_module_names
22
23 PY3 = py_version[0] == 3
24
25 if PY3:
26 if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...
27 raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')
28 else:
29 if py_version < (2, 6):
30 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
31
32 here = os.path.abspath(os.path.dirname(__file__))
33 try:
34 with open(os.path.join(here, 'README.rst')) as f:
35 README = f.read()
36 with open(os.path.join(here, 'CHANGES.txt')) as f:
37 CHANGES = f.read()
38 except IOError:
39 README = CHANGES = ''
40
41 install_requires=[
42 'setuptools',
43 'WebOb >= 1.3.1', # request.domain and CookieProfile
44 'repoze.lru >= 0.4', # py3 compat
45 'zope.interface >= 3.8.0', # has zope.interface.registry
46 'zope.deprecation >= 3.5.0', # py3 compat
47 'venusian >= 1.0a3', # ``ignore``
48 'translationstring >= 0.4', # py3 compat
49 'PasteDeploy >= 1.5.0', # py3 compat
50 ]
51
52 tests_require = [
53 'WebTest >= 1.3.1', # py3 compat
54 ]
55
56 if not PY3:
57 tests_require.append('zope.component>=3.11.0')
58
59 docs_extras = [
60 'Sphinx >= 1.3.4',
61 'docutils',
62 'repoze.sphinx.autointerface',
63 'pylons_sphinx_latesturl',
64 'pylons-sphinx-themes',
65 'sphinxcontrib-programoutput',
66 ]
67
68 testing_extras = tests_require + [
69 'nose',
70 'coverage',
71 'virtualenv', # for scaffolding tests
72 ]
73
74 setup(name='pyramid',
75 version='1.7.dev0',
76 description='The Pyramid Web Framework, a Pylons project',
77 long_description=README + '\n\n' + CHANGES,
78 classifiers=[
79 "Development Status :: 6 - Mature",
80 "Intended Audience :: Developers",
81 "Programming Language :: Python",
82 "Programming Language :: Python :: 2.6",
83 "Programming Language :: Python :: 2.7",
84 "Programming Language :: Python :: 3",
85 "Programming Language :: Python :: 3.3",
86 "Programming Language :: Python :: 3.4",
87 "Programming Language :: Python :: 3.5",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Programming Language :: Python :: Implementation :: PyPy",
90 "Framework :: Pyramid",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI",
93 "License :: Repoze Public License",
94 ],
95 keywords='web wsgi pylons pyramid',
96 author="Chris McDonough, Agendaless Consulting",
97 author_email="[email protected]",
98 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
99 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
100 packages=find_packages(),
101 include_package_data=True,
102 zip_safe=False,
103 install_requires = install_requires,
104 extras_require = {
105 'testing':testing_extras,
106 'docs':docs_extras,
107 },
108 tests_require = tests_require,
109 test_suite="pyramid.tests",
110 entry_points = """\
111 [pyramid.scaffold]
112 starter=pyramid.scaffolds:StarterProjectTemplate
113 zodb=pyramid.scaffolds:ZODBProjectTemplate
114 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
115 [pyramid.pshell_runner]
116 python=pyramid.scripts.pshell:python_shell_runner
117 [console_scripts]
118 pcreate = pyramid.scripts.pcreate:main
119 pserve = pyramid.scripts.pserve:main
120 pshell = pyramid.scripts.pshell:main
121 proutes = pyramid.scripts.proutes:main
122 pviews = pyramid.scripts.pviews:main
123 ptweens = pyramid.scripts.ptweens:main
124 prequest = pyramid.scripts.prequest:main
125 pdistreport = pyramid.scripts.pdistreport:main
126 [paste.server_runner]
127 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
128 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
129 """
130 )
131
132
```
Path: `docs/quick_tour/requests/app.py`
Content:
```
1 from wsgiref.simple_server import make_server
2 from pyramid.config import Configurator
3 from pyramid.response import Response
4
5
6 def hello_world(request):
7 """ Some parameters from a request such as /?name=lisa """
8 url = request.url
9 name = request.params.get('name', 'No Name Provided')
10
11 body = 'URL %s with name: %s' % (url, name)
12 return Response(
13 content_type="text/plain",
14 body=body
15 )
16
17
18 if __name__ == '__main__':
19 config = Configurator()
20 config.add_route('hello', '/')
21 config.add_view(hello_world, route_name='hello')
22 app = config.make_wsgi_app()
23 server = make_server('0.0.0.0', 6543, app)
24 server.serve_forever()
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/quick_tour/requests/app.py b/docs/quick_tour/requests/app.py
--- a/docs/quick_tour/requests/app.py
+++ b/docs/quick_tour/requests/app.py
@@ -4,7 +4,7 @@
def hello_world(request):
- """ Some parameters from a request such as /?name=lisa """
+ # Some parameters from a request such as /?name=lisa
url = request.url
name = request.params.get('name', 'No Name Provided')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.4',
+ 'Sphinx >= 1.3.5',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/docs/quick_tour/requests/app.py b/docs/quick_tour/requests/app.py\n--- a/docs/quick_tour/requests/app.py\n+++ b/docs/quick_tour/requests/app.py\n@@ -4,7 +4,7 @@\n \n \n def hello_world(request):\n- \"\"\" Some parameters from a request such as /?name=lisa \"\"\"\n+ # Some parameters from a request such as /?name=lisa\n url = request.url\n name = request.params.get('name', 'No Name Provided')\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.4',\n+ 'Sphinx >= 1.3.5',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "pyobject truncates code at comment\nSee https://github.com/sphinx-doc/sphinx/issues/2253\n\nExample rendered docs:\nhttp://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tour.html#handling-web-requests-and-responses\n\nrst syntax:\nhttps://github.com/Pylons/pyramid/blame/master/docs/quick_tour.rst#L119-L120\n\nSource code:\nhttps://github.com/Pylons/pyramid/blob/master/docs/quick_tour/requests/app.py#L7\n\nWhen the bug is fixed and released, we will need to:\n- revert the source code sample to use `#` style comments\n- bump up the Sphinx version\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\nis_pypy = '__pypy__' in sys.builtin_module_names\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...\n raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}, {"content": "from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\n\n\ndef hello_world(request):\n \"\"\" Some parameters from a request such as /?name=lisa \"\"\"\n url = request.url\n name = request.params.get('name', 'No Name Provided')\n\n body = 'URL %s with name: %s' % (url, name)\n return Response(\n content_type=\"text/plain\",\n body=body\n )\n\n\nif __name__ == '__main__':\n config = Configurator()\n config.add_route('hello', '/')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n", "path": "docs/quick_tour/requests/app.py"}], "after_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\nis_pypy = '__pypy__' in sys.builtin_module_names\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...\n raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.5',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}, {"content": "from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\n\n\ndef hello_world(request):\n # Some parameters from a request such as /?name=lisa\n url = request.url\n name = request.params.get('name', 'No Name Provided')\n\n body = 'URL %s with name: %s' % (url, name)\n return Response(\n content_type=\"text/plain\",\n body=body\n )\n\n\nif __name__ == '__main__':\n config = Configurator()\n config.add_route('hello', '/')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n", "path": "docs/quick_tour/requests/app.py"}]} | 2,113 | 223 |
gh_patches_debug_33519 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/binary_exponentiation_2.py`
Content:
```
1 """
2 * Binary Exponentiation for Powers
3 * This is a method to find a^b in a time complexity of O(log b)
4 * This is one of the most commonly used methods of finding powers.
5 * Also useful in cases where solution to (a^b)%c is required,
6 * where a,b,c can be numbers over the computers calculation limits.
7 * Done using iteration, can also be done using recursion
8
9 * @author chinmoy159
10 * @version 1.0 dated 10/08/2017
11 """
12
13
14 def b_expo(a: int, b: int) -> int:
15 res = 1
16 while b > 0:
17 if b & 1:
18 res *= a
19
20 a *= a
21 b >>= 1
22
23 return res
24
25
26 def b_expo_mod(a: int, b: int, c: int) -> int:
27 res = 1
28 while b > 0:
29 if b & 1:
30 res = ((res % c) * (a % c)) % c
31
32 a *= a
33 b >>= 1
34
35 return res
36
37
38 """
39 * Wondering how this method works !
40 * It's pretty simple.
41 * Let's say you need to calculate a ^ b
42 * RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
43 * RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.
44 * Once b is even, repeat the process to get a ^ b
45 * Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1
46 *
47 * As far as the modulo is concerned,
48 * the fact : (a*b) % c = ((a%c) * (b%c)) % c
49 * Now apply RULE 1 OR 2 whichever is required.
50 """
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py
--- a/maths/binary_exponentiation_2.py
+++ b/maths/binary_exponentiation_2.py
@@ -1,17 +1,33 @@
"""
-* Binary Exponentiation for Powers
-* This is a method to find a^b in a time complexity of O(log b)
-* This is one of the most commonly used methods of finding powers.
-* Also useful in cases where solution to (a^b)%c is required,
-* where a,b,c can be numbers over the computers calculation limits.
-* Done using iteration, can also be done using recursion
-
-* @author chinmoy159
-* @version 1.0 dated 10/08/2017
+Binary Exponentiation
+This is a method to find a^b in O(log b) time complexity
+This is one of the most commonly used methods of exponentiation
+It's also useful when the solution to (a^b) % c is required because a, b, c may be
+over the computer's calculation limits
+
+Let's say you need to calculate a ^ b
+- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
+- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even
+Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1
+
+For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c
+Now apply RULE 1 or 2 as required
+
+@author chinmoy159
"""
def b_expo(a: int, b: int) -> int:
+ """
+ >>> b_expo(2, 10)
+ 1024
+ >>> b_expo(9, 0)
+ 1
+ >>> b_expo(0, 12)
+ 0
+ >>> b_expo(4, 12)
+ 16777216
+ """
res = 1
while b > 0:
if b & 1:
@@ -24,6 +40,16 @@
def b_expo_mod(a: int, b: int, c: int) -> int:
+ """
+ >>> b_expo_mod(2, 10, 1000000007)
+ 1024
+ >>> b_expo_mod(11, 13, 19)
+ 11
+ >>> b_expo_mod(0, 19, 20)
+ 0
+ >>> b_expo_mod(15, 5, 4)
+ 3
+ """
res = 1
while b > 0:
if b & 1:
@@ -33,18 +59,3 @@
b >>= 1
return res
-
-
-"""
-* Wondering how this method works !
-* It's pretty simple.
-* Let's say you need to calculate a ^ b
-* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
-* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.
-* Once b is even, repeat the process to get a ^ b
-* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1
-*
-* As far as the modulo is concerned,
-* the fact : (a*b) % c = ((a%c) * (b%c)) % c
-* Now apply RULE 1 OR 2 whichever is required.
-"""
| {"golden_diff": "diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py\n--- a/maths/binary_exponentiation_2.py\n+++ b/maths/binary_exponentiation_2.py\n@@ -1,17 +1,33 @@\n \"\"\"\n-* Binary Exponentiation for Powers\n-* This is a method to find a^b in a time complexity of O(log b)\n-* This is one of the most commonly used methods of finding powers.\n-* Also useful in cases where solution to (a^b)%c is required,\n-* where a,b,c can be numbers over the computers calculation limits.\n-* Done using iteration, can also be done using recursion\n-\n-* @author chinmoy159\n-* @version 1.0 dated 10/08/2017\n+Binary Exponentiation\n+This is a method to find a^b in O(log b) time complexity\n+This is one of the most commonly used methods of exponentiation\n+It's also useful when the solution to (a^b) % c is required because a, b, c may be\n+over the computer's calculation limits\n+\n+Let's say you need to calculate a ^ b\n+- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n+- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even\n+Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1\n+\n+For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c\n+Now apply RULE 1 or 2 as required\n+\n+@author chinmoy159\n \"\"\"\n \n \n def b_expo(a: int, b: int) -> int:\n+ \"\"\"\n+ >>> b_expo(2, 10)\n+ 1024\n+ >>> b_expo(9, 0)\n+ 1\n+ >>> b_expo(0, 12)\n+ 0\n+ >>> b_expo(4, 12)\n+ 16777216\n+ \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n@@ -24,6 +40,16 @@\n \n \n def b_expo_mod(a: int, b: int, c: int) -> int:\n+ \"\"\"\n+ >>> b_expo_mod(2, 10, 1000000007)\n+ 1024\n+ >>> b_expo_mod(11, 13, 19)\n+ 11\n+ >>> b_expo_mod(0, 19, 20)\n+ 0\n+ >>> b_expo_mod(15, 5, 4)\n+ 3\n+ \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n@@ -33,18 +59,3 @@\n b >>= 1\n \n return res\n-\n-\n-\"\"\"\n-* Wondering how this method works !\n-* It's pretty simple.\n-* Let's say you need to calculate a ^ b\n-* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n-* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.\n-* Once b is even, repeat the process to get a ^ b\n-* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1\n-*\n-* As far as the modulo is concerned,\n-* the fact : (a*b) % c = ((a%c) * (b%c)) % c\n-* Now apply RULE 1 OR 2 whichever is required.\n-\"\"\"\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\n* Binary Exponentiation for Powers\n* This is a method to find a^b in a time complexity of O(log b)\n* This is one of the most commonly used methods of finding powers.\n* Also useful in cases where solution to (a^b)%c is required,\n* where a,b,c can be numbers over the computers calculation limits.\n* Done using iteration, can also be done using recursion\n\n* @author chinmoy159\n* @version 1.0 dated 10/08/2017\n\"\"\"\n\n\ndef b_expo(a: int, b: int) -> int:\n res = 1\n while b > 0:\n if b & 1:\n res *= a\n\n a *= a\n b >>= 1\n\n return res\n\n\ndef b_expo_mod(a: int, b: int, c: int) -> int:\n res = 1\n while b > 0:\n if b & 1:\n res = ((res % c) * (a % c)) % c\n\n a *= a\n b >>= 1\n\n return res\n\n\n\"\"\"\n* Wondering how this method works !\n* It's pretty simple.\n* Let's say you need to calculate a ^ b\n* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.\n* Once b is even, repeat the process to get a ^ b\n* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1\n*\n* As far as the modulo is concerned,\n* the fact : (a*b) % c = ((a%c) * (b%c)) % c\n* Now apply RULE 1 OR 2 whichever is required.\n\"\"\"\n", "path": "maths/binary_exponentiation_2.py"}], "after_files": [{"content": "\"\"\"\nBinary Exponentiation\nThis is a method to find a^b in O(log b) time complexity\nThis is one of the most commonly used methods of exponentiation\nIt's also useful when the solution to (a^b) % c is required because a, b, c may be\nover the computer's calculation limits\n\nLet's say you need to calculate a ^ b\n- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even\nOnce b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1\n\nFor modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c\nNow apply RULE 1 or 2 as required\n\n@author chinmoy159\n\"\"\"\n\n\ndef b_expo(a: int, b: int) -> int:\n \"\"\"\n >>> b_expo(2, 10)\n 1024\n >>> b_expo(9, 0)\n 1\n >>> b_expo(0, 12)\n 0\n >>> b_expo(4, 12)\n 16777216\n \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n res *= a\n\n a *= a\n b >>= 1\n\n return res\n\n\ndef b_expo_mod(a: int, b: int, c: int) -> int:\n \"\"\"\n >>> b_expo_mod(2, 10, 1000000007)\n 1024\n >>> b_expo_mod(11, 13, 19)\n 11\n >>> b_expo_mod(0, 19, 20)\n 0\n >>> b_expo_mod(15, 5, 4)\n 3\n \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n res = ((res % c) * (a % c)) % c\n\n a *= a\n b >>= 1\n\n return res\n", "path": "maths/binary_exponentiation_2.py"}]} | 1,656 | 955 |
gh_patches_debug_21356 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-1459 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pymodbus.simulator ignores command line arguments "http_port" and "json_file"
### Versions
- Python: 3.10.10
- OS: Fedora 36
- Pymodbus: 3.2.2
### Pymodbus Specific
- Server: tcp
- Client: tcp
### Description
- when starting pymodbus.simulator with a specific json file, an error stating a different filename is shown
```
$ pymodbus.simulator --json_file /tmp/setup.json
2023-03-25 20:06:03,532 INFO logging:96 Start simulator
Traceback (most recent call last):
File "/usr/bin/pymodbus.simulator", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py", line 119, in main
asyncio.run(run_main(), debug=True)
File "/usr/lib64/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 649, in run_until_complete
return future.result()
File "/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py", line 113, in run_main
task = ModbusSimulatorServer(**cmd_args)
File "/usr/lib/python3.10/site-packages/pymodbus/server/simulator/http_server.py", line 130, in __init__
with open(json_file, encoding="utf-8") as file:
FileNotFoundError: [Errno 2] No such file or directory: './pymodbus/server/simulator/setup.json'
```
- the problem is in https://github.com/pymodbus-dev/pymodbus/blob/c2db53cad3c60834bffecd23db699f6982f914fb/pymodbus/server/simulator/main.py#L113 where the command line arguments are overwritten
- if it is the intention having default values, a patch keeping exactly these defaults would look like this:
```
diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py
index 09af492..443e1a6 100755
--- a/pymodbus/server/simulator/main.py
+++ b/pymodbus/server/simulator/main.py
@@ -70,6 +70,7 @@ def get_commandline():
parser.add_argument(
"--http_port",
help="use <http_port> as port to bind http listen",
+ default=8081,
type=str,
)
parser.add_argument(
@@ -82,6 +83,7 @@ def get_commandline():
parser.add_argument(
"--json_file",
help='name of json file, default is "setup.json"',
+ default="./pymodbus/server/simulator/setup.json",
type=str,
)
parser.add_argument(
@@ -109,8 +111,6 @@ def get_commandline():
async def run_main():
"""Run server async."""
cmd_args = get_commandline()
- cmd_args["http_port"] = 8081
- cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
task = ModbusSimulatorServer(**cmd_args)
await task.run_forever()
```
- however, for running pymodbus.simulator after installing without any arguments, I would suggest the following:
- install the default setup.json e.g. into the same directory as main.py of the simulator
- use a diff like this:
```
diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py
index 09af492..086d724 100755
--- a/pymodbus/server/simulator/main.py
+++ b/pymodbus/server/simulator/main.py
@@ -41,6 +41,7 @@ options:
"""
import argparse
import asyncio
+import os
from pymodbus import pymodbus_apply_logging_config
from pymodbus.logging import Log
@@ -70,6 +71,7 @@ def get_commandline():
parser.add_argument(
"--http_port",
help="use <http_port> as port to bind http listen",
+ default=8081,
type=str,
)
parser.add_argument(
@@ -82,6 +84,7 @@ def get_commandline():
parser.add_argument(
"--json_file",
help='name of json file, default is "setup.json"',
+ default=os.path.join(os.path.dirname(__file__), "setup.json"),
type=str,
)
parser.add_argument(
@@ -109,8 +112,6 @@ def get_commandline():
async def run_main():
"""Run server async."""
cmd_args = get_commandline()
- cmd_args["http_port"] = 8081
- cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
task = ModbusSimulatorServer(**cmd_args)
await task.run_forever()
```
- please provide some guidance which way to go, then I'll create an according pull request
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymodbus/server/simulator/main.py`
Content:
```
1 #!/usr/bin/env python3
2 """HTTP server for modbus simulator.
3
4 The modbus simulator contain 3 distint parts:
5
6 - Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)
7 - Modbus server: (server)(./http_server.py)
8 - HTTP server with REST API and web pages providing an online console in your browser
9
10 Multiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)
11
12 The command line parameters are kept to a minimum:
13
14 usage: main.py [-h] [--modbus_server MODBUS_SERVER]
15 [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]
16 [--http_port HTTP_PORT]
17 [--log {critical,error,warning,info,debug}]
18 [--json_file JSON_FILE]
19 [--custom_actions_module CUSTOM_ACTIONS_MODULE]
20
21 Modbus server with REST-API and web server
22
23 options:
24 -h, --help show this help message and exit
25 --modbus_server MODBUS_SERVER
26 use <modbus_server> from server_list in json file
27 --modbus_device MODBUS_DEVICE
28 use <modbus_device> from device_list in json file
29 --http_host HTTP_HOST
30 use <http_host> as host to bind http listen
31 --http_port HTTP_PORT
32 use <http_port> as port to bind http listen
33 --log {critical,error,warning,info,debug}
34 set log level, default is info
35 --log_file LOG_FILE
36 name of server log file, default is "server.log"
37 --json_file JSON_FILE
38 name of json_file, default is "setup.json"
39 --custom_actions_module CUSTOM_ACTIONS_MODULE
40 python file with custom actions, default is none
41 """
42 import argparse
43 import asyncio
44
45 from pymodbus import pymodbus_apply_logging_config
46 from pymodbus.logging import Log
47 from pymodbus.server.simulator.http_server import ModbusSimulatorServer
48
49
50 def get_commandline():
51 """Get command line arguments."""
52 parser = argparse.ArgumentParser(
53 description="Modbus server with REST-API and web server"
54 )
55 parser.add_argument(
56 "--modbus_server",
57 help="use <modbus_server> from server_list in json file",
58 type=str,
59 )
60 parser.add_argument(
61 "--modbus_device",
62 help="use <modbus_device> from device_list in json file",
63 type=str,
64 )
65 parser.add_argument(
66 "--http_host",
67 help="use <http_host> as host to bind http listen",
68 type=str,
69 )
70 parser.add_argument(
71 "--http_port",
72 help="use <http_port> as port to bind http listen",
73 type=str,
74 )
75 parser.add_argument(
76 "--log",
77 choices=["critical", "error", "warning", "info", "debug"],
78 help="set log level, default is info",
79 default="info",
80 type=str,
81 )
82 parser.add_argument(
83 "--json_file",
84 help='name of json file, default is "setup.json"',
85 type=str,
86 )
87 parser.add_argument(
88 "--log_file",
89 help='name of server log file, default is "server.log"',
90 type=str,
91 )
92 parser.add_argument(
93 "--custom_actions_module",
94 help="python file with custom actions, default is none",
95 type=str,
96 )
97 args = parser.parse_args()
98 pymodbus_apply_logging_config(args.log.upper())
99 Log.info("Start simulator")
100 cmd_args = {}
101 for argument in args.__dict__:
102 if argument == "log":
103 continue
104 if args.__dict__[argument] is not None:
105 cmd_args[argument] = args.__dict__[argument]
106 return cmd_args
107
108
109 async def run_main():
110 """Run server async."""
111 cmd_args = get_commandline()
112 cmd_args["http_port"] = 8081
113 cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
114 task = ModbusSimulatorServer(**cmd_args)
115 await task.run_forever()
116
117
118 def main():
119 """Run server."""
120 asyncio.run(run_main(), debug=True)
121
122
123 if __name__ == "__main__":
124 main()
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py
--- a/pymodbus/server/simulator/main.py
+++ b/pymodbus/server/simulator/main.py
@@ -66,6 +66,7 @@
"--http_host",
help="use <http_host> as host to bind http listen",
type=str,
+ default=8081,
)
parser.add_argument(
"--http_port",
@@ -83,6 +84,7 @@
"--json_file",
help='name of json file, default is "setup.json"',
type=str,
+ default="./pymodbus/server/simulator/setup.json",
)
parser.add_argument(
"--log_file",
@@ -109,8 +111,6 @@
async def run_main():
"""Run server async."""
cmd_args = get_commandline()
- cmd_args["http_port"] = 8081
- cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
task = ModbusSimulatorServer(**cmd_args)
await task.run_forever()
| {"golden_diff": "diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py\n--- a/pymodbus/server/simulator/main.py\n+++ b/pymodbus/server/simulator/main.py\n@@ -66,6 +66,7 @@\n \"--http_host\",\n help=\"use <http_host> as host to bind http listen\",\n type=str,\n+ default=8081,\n )\n parser.add_argument(\n \"--http_port\",\n@@ -83,6 +84,7 @@\n \"--json_file\",\n help='name of json file, default is \"setup.json\"',\n type=str,\n+ default=\"./pymodbus/server/simulator/setup.json\",\n )\n parser.add_argument(\n \"--log_file\",\n@@ -109,8 +111,6 @@\n async def run_main():\n \"\"\"Run server async.\"\"\"\n cmd_args = get_commandline()\n- cmd_args[\"http_port\"] = 8081\n- cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\n task = ModbusSimulatorServer(**cmd_args)\n await task.run_forever()\n", "issue": "pymodbus.simulator ignores command line arguments \"http_port\" and \"json_file\"\n### Versions\r\n\r\n- Python: 3.10.10\r\n- OS: Fedora 36\r\n- Pymodbus: 3.2.2\r\n\r\n### Pymodbus Specific\r\n\r\n- Server: tcp\r\n- Client: tcp\r\n\r\n### Description\r\n\r\n- when starting pymodbus.simulator with a specific json file, an error stating a different filename is shown\r\n```\r\n$ pymodbus.simulator --json_file /tmp/setup.json \r\n2023-03-25 20:06:03,532 INFO logging:96 Start simulator\r\nTraceback (most recent call last):\r\n File \"/usr/bin/pymodbus.simulator\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py\", line 119, in main\r\n asyncio.run(run_main(), debug=True)\r\n File \"/usr/lib64/python3.10/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"/usr/lib64/python3.10/asyncio/base_events.py\", line 649, in run_until_complete\r\n return future.result()\r\n File \"/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py\", line 113, in run_main\r\n task = ModbusSimulatorServer(**cmd_args)\r\n File \"/usr/lib/python3.10/site-packages/pymodbus/server/simulator/http_server.py\", line 130, in __init__\r\n with open(json_file, encoding=\"utf-8\") as file:\r\nFileNotFoundError: [Errno 2] No such file or directory: './pymodbus/server/simulator/setup.json'\r\n```\r\n- the problem is in https://github.com/pymodbus-dev/pymodbus/blob/c2db53cad3c60834bffecd23db699f6982f914fb/pymodbus/server/simulator/main.py#L113 where the command line arguments are overwritten\r\n- if it is the intention having default values, a patch keeping exactly these defaults would look like this:\r\n```\r\ndiff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py\r\nindex 09af492..443e1a6 100755\r\n--- a/pymodbus/server/simulator/main.py\r\n+++ b/pymodbus/server/simulator/main.py\r\n@@ -70,6 +70,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--http_port\",\r\n help=\"use <http_port> as port to bind http listen\",\r\n+ default=8081,\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -82,6 +83,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--json_file\",\r\n help='name of json file, default is \"setup.json\"',\r\n+ default=\"./pymodbus/server/simulator/setup.json\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -109,8 +111,6 @@ def get_commandline():\r\n async def run_main():\r\n \"\"\"Run server async.\"\"\"\r\n cmd_args = get_commandline()\r\n- cmd_args[\"http_port\"] = 8081\r\n- cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\r\n task = ModbusSimulatorServer(**cmd_args)\r\n await task.run_forever()\r\n ```\r\n- however, for running pymodbus.simulator after installing without any arguments, I would suggest the following:\r\n - install the default setup.json e.g. into the same directory as main.py of the simulator\r\n - use a diff like this:\r\n```\r\ndiff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py\r\nindex 09af492..086d724 100755\r\n--- a/pymodbus/server/simulator/main.py\r\n+++ b/pymodbus/server/simulator/main.py\r\n@@ -41,6 +41,7 @@ options:\r\n \"\"\"\r\n import argparse\r\n import asyncio\r\n+import os\r\n \r\n from pymodbus import pymodbus_apply_logging_config\r\n from pymodbus.logging import Log\r\n@@ -70,6 +71,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--http_port\",\r\n help=\"use <http_port> as port to bind http listen\",\r\n+ default=8081,\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -82,6 +84,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--json_file\",\r\n help='name of json file, default is \"setup.json\"',\r\n+ default=os.path.join(os.path.dirname(__file__), \"setup.json\"),\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -109,8 +112,6 @@ def get_commandline():\r\n async def run_main():\r\n \"\"\"Run server async.\"\"\"\r\n cmd_args = get_commandline()\r\n- cmd_args[\"http_port\"] = 8081\r\n- cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\r\n task = ModbusSimulatorServer(**cmd_args)\r\n await task.run_forever()\r\n \r\n```\r\n- please provide some guidance which way to go, then I'll create an according pull request\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"HTTP server for modbus simulator.\n\nThe modbus simulator contain 3 distint parts:\n\n- Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)\n- Modbus server: (server)(./http_server.py)\n- HTTP server with REST API and web pages providing an online console in your browser\n\nMultiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)\n\nThe command line parameters are kept to a minimum:\n\nusage: main.py [-h] [--modbus_server MODBUS_SERVER]\n [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]\n [--http_port HTTP_PORT]\n [--log {critical,error,warning,info,debug}]\n [--json_file JSON_FILE]\n [--custom_actions_module CUSTOM_ACTIONS_MODULE]\n\nModbus server with REST-API and web server\n\noptions:\n -h, --help show this help message and exit\n --modbus_server MODBUS_SERVER\n use <modbus_server> from server_list in json file\n --modbus_device MODBUS_DEVICE\n use <modbus_device> from device_list in json file\n --http_host HTTP_HOST\n use <http_host> as host to bind http listen\n --http_port HTTP_PORT\n use <http_port> as port to bind http listen\n --log {critical,error,warning,info,debug}\n set log level, default is info\n --log_file LOG_FILE\n name of server log file, default is \"server.log\"\n --json_file JSON_FILE\n name of json_file, default is \"setup.json\"\n --custom_actions_module CUSTOM_ACTIONS_MODULE\n python file with custom actions, default is none\n\"\"\"\nimport argparse\nimport asyncio\n\nfrom pymodbus import pymodbus_apply_logging_config\nfrom pymodbus.logging import Log\nfrom pymodbus.server.simulator.http_server import ModbusSimulatorServer\n\n\ndef get_commandline():\n \"\"\"Get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Modbus server with REST-API and web server\"\n )\n parser.add_argument(\n \"--modbus_server\",\n help=\"use <modbus_server> from server_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--modbus_device\",\n help=\"use <modbus_device> from device_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--http_host\",\n help=\"use <http_host> as host to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--http_port\",\n help=\"use <http_port> as port to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--log\",\n choices=[\"critical\", \"error\", \"warning\", \"info\", \"debug\"],\n help=\"set log level, default is info\",\n default=\"info\",\n type=str,\n )\n parser.add_argument(\n \"--json_file\",\n help='name of json file, default is \"setup.json\"',\n type=str,\n )\n parser.add_argument(\n \"--log_file\",\n help='name of server log file, default is \"server.log\"',\n type=str,\n )\n parser.add_argument(\n \"--custom_actions_module\",\n help=\"python file with custom actions, default is none\",\n type=str,\n )\n args = parser.parse_args()\n pymodbus_apply_logging_config(args.log.upper())\n Log.info(\"Start simulator\")\n cmd_args = {}\n for argument in args.__dict__:\n if argument == \"log\":\n continue\n if args.__dict__[argument] is not None:\n cmd_args[argument] = args.__dict__[argument]\n return cmd_args\n\n\nasync def run_main():\n \"\"\"Run server async.\"\"\"\n cmd_args = get_commandline()\n cmd_args[\"http_port\"] = 8081\n cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\n task = ModbusSimulatorServer(**cmd_args)\n await task.run_forever()\n\n\ndef main():\n \"\"\"Run server.\"\"\"\n asyncio.run(run_main(), debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "pymodbus/server/simulator/main.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"HTTP server for modbus simulator.\n\nThe modbus simulator contain 3 distint parts:\n\n- Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)\n- Modbus server: (server)(./http_server.py)\n- HTTP server with REST API and web pages providing an online console in your browser\n\nMultiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)\n\nThe command line parameters are kept to a minimum:\n\nusage: main.py [-h] [--modbus_server MODBUS_SERVER]\n [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]\n [--http_port HTTP_PORT]\n [--log {critical,error,warning,info,debug}]\n [--json_file JSON_FILE]\n [--custom_actions_module CUSTOM_ACTIONS_MODULE]\n\nModbus server with REST-API and web server\n\noptions:\n -h, --help show this help message and exit\n --modbus_server MODBUS_SERVER\n use <modbus_server> from server_list in json file\n --modbus_device MODBUS_DEVICE\n use <modbus_device> from device_list in json file\n --http_host HTTP_HOST\n use <http_host> as host to bind http listen\n --http_port HTTP_PORT\n use <http_port> as port to bind http listen\n --log {critical,error,warning,info,debug}\n set log level, default is info\n --log_file LOG_FILE\n name of server log file, default is \"server.log\"\n --json_file JSON_FILE\n name of json_file, default is \"setup.json\"\n --custom_actions_module CUSTOM_ACTIONS_MODULE\n python file with custom actions, default is none\n\"\"\"\nimport argparse\nimport asyncio\n\nfrom pymodbus import pymodbus_apply_logging_config\nfrom pymodbus.logging import Log\nfrom pymodbus.server.simulator.http_server import ModbusSimulatorServer\n\n\ndef get_commandline():\n \"\"\"Get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Modbus server with REST-API and web server\"\n )\n parser.add_argument(\n \"--modbus_server\",\n help=\"use <modbus_server> from server_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--modbus_device\",\n help=\"use <modbus_device> from device_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--http_host\",\n help=\"use <http_host> as host to bind http listen\",\n type=str,\n default=8081,\n )\n parser.add_argument(\n \"--http_port\",\n help=\"use <http_port> as port to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--log\",\n choices=[\"critical\", \"error\", \"warning\", \"info\", \"debug\"],\n help=\"set log level, default is info\",\n default=\"info\",\n type=str,\n )\n parser.add_argument(\n \"--json_file\",\n help='name of json file, default is \"setup.json\"',\n type=str,\n default=\"./pymodbus/server/simulator/setup.json\",\n )\n parser.add_argument(\n \"--log_file\",\n help='name of server log file, default is \"server.log\"',\n type=str,\n )\n parser.add_argument(\n \"--custom_actions_module\",\n help=\"python file with custom actions, default is none\",\n type=str,\n )\n args = parser.parse_args()\n pymodbus_apply_logging_config(args.log.upper())\n Log.info(\"Start simulator\")\n cmd_args = {}\n for argument in args.__dict__:\n if argument == \"log\":\n continue\n if args.__dict__[argument] is not None:\n cmd_args[argument] = args.__dict__[argument]\n return cmd_args\n\n\nasync def run_main():\n \"\"\"Run server async.\"\"\"\n cmd_args = get_commandline()\n task = ModbusSimulatorServer(**cmd_args)\n await task.run_forever()\n\n\ndef main():\n \"\"\"Run server.\"\"\"\n asyncio.run(run_main(), debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "pymodbus/server/simulator/main.py"}]} | 2,667 | 253 |
gh_patches_debug_25843 | rasdani/github-patches | git_diff | fidals__shopelectro-965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a custom page for 500 error
The page should have an apology and an suggestion to continue the purchase with the help of a consultant
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/urls.py`
Content:
```
1 from datetime import timedelta
2 from collections import OrderedDict
3
4 from django.conf import settings
5 from django.conf.urls import url, include
6 from django.conf.urls.static import static
7 from django.contrib.sitemaps.views import sitemap
8 from django.views.decorators.cache import cache_page, never_cache
9
10 from pages.views import RobotsView, SitemapPage
11 from pages.urls import custom_page_url
12
13 from shopelectro import sitemaps, views
14 from shopelectro.admin import se_admin
15
16
17 def cached_time(*args, **kwargs) -> int:
18 """Return value of time for caching in seconds."""
19 return int(timedelta(*args, **kwargs).total_seconds())
20
21
22 # Orders sitemaps instances
23 sitemaps = OrderedDict([
24 ('index', sitemaps.IndexSitemap),
25 ('category', sitemaps.CategorySitemap),
26 ('category-with-tags', sitemaps.CategoryWithTagsSitemap),
27 ('products', sitemaps.ProductSitemap),
28 ('site', sitemaps.PagesSitemap)
29 ])
30
31 # disable cache
32 if settings.DEBUG:
33 def cache_page(arg): # Ignore PyFlakesBear
34 if callable(arg):
35 return arg
36 return cache_page
37
38 cached_60d = cache_page(cached_time(days=60))
39 cached_2h = cache_page(cached_time(hours=2))
40
41 admin_urls = [
42 url(r'^', se_admin.urls),
43 url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),
44 url(r'^get-tree-items/$', views.Tree.as_view()),
45 url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),
46 url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),
47 url(r'^select2/', include('django_select2.urls')),
48 ]
49
50 catalog_urls = [
51 # "category" group
52 url(r'^categories/(?P<slug>[\w-]+)/$',
53 cached_2h(views.CategoryPage.as_view()), name='category'),
54 url(r'^categories/(?P<slug>[\w-]+)/tags/(?P<tags>[\w_-]+)/$',
55 cached_2h(views.CategoryPage.as_view()), name='category'),
56 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/$',
57 views.CategoryPage.as_view(), name='category'),
58 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
59 views.CategoryPage.as_view(), name='category'),
60 # "load more" group
61 url(r'categories/(?P<slug>[\w-]+)/load-more/'
62 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',
63 views.load_more, name='load_more'),
64 url(r'categories/(?P<slug>[\w-]+)/load-more/'
65 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
66 views.load_more, name='load_more'),
67 # rest of urls
68 url(r'^no-images/$', views.ProductsWithoutImages.as_view(),
69 name='products_without_images'),
70 url(r'^no-text/$', views.ProductsWithoutText.as_view(),
71 name='products_without_text'),
72 url(r'^products/(?P<product_vendor_code>[0-9]+)/$',
73 views.ProductPage.as_view(), name='product'),
74 ]
75
76 service_urls = [
77 url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),
78 url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),
79 url(r'^ya-feedback/redirect/$',
80 views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),
81 url(r'^ya-feedback/request/$',
82 views.ya_feedback_request, name='ya_feedback_request'),
83 ]
84
85 search_urls = [
86 url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),
87 ]
88
89 ecommerce_urls = [
90 url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),
91 url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),
92 url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),
93 url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),
94 url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),
95 url(r'^order-call/$', views.order_call),
96 url(r'^one-click-buy/$', views.one_click_buy),
97 url(r'^yandex-order/$', views.YandexOrder.as_view()),
98 url(r'', include('ecommerce.urls')),
99 ]
100
101 custom_pages = [
102 # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work
103 custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),
104 custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view()),
105 custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
106 custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),
107 custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),
108 # these pages should show only actual state
109 custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),
110 custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),
111 ]
112
113 urlpatterns = [
114 url('', include(custom_pages)),
115 url(r'^admin/', include(admin_urls)),
116 url(r'^catalog/', include(catalog_urls)),
117 url(r'^pages/', include('pages.urls')),
118 url(r'^save-feedback/$', views.save_feedback),
119 url(r'^delete-feedback/$', views.delete_feedback),
120 url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),
121 url(r'^shop/', include(ecommerce_urls)),
122 url(r'^search/', include(search_urls)),
123 url(r'^service/', include(service_urls)),
124 url(r'^sitemap\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),
125 ]
126
127 if settings.DEBUG:
128 import debug_toolbar
129
130 urlpatterns += [
131 url(r'^__debug__/', include(debug_toolbar.urls)),
132 *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
133 *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
134 ]
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/urls.py b/shopelectro/urls.py
--- a/shopelectro/urls.py
+++ b/shopelectro/urls.py
@@ -1,15 +1,15 @@
-from datetime import timedelta
from collections import OrderedDict
+from datetime import timedelta
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page, never_cache
+from django.views.generic import TemplateView
-from pages.views import RobotsView, SitemapPage
from pages.urls import custom_page_url
-
+from pages.views import RobotsView, SitemapPage
from shopelectro import sitemaps, views
from shopelectro.admin import se_admin
@@ -132,3 +132,11 @@
*static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
*static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
]
+
+# Test and Debug environments replace real 404 and 500 error with stack traces.
+# We expose real 404 and 500 pages with separated urls to test them.
+if settings.TEST_ENV or settings.DEBUG:
+ urlpatterns += [
+ url(r'^404/$', TemplateView.as_view(template_name='404.html')),
+ url(r'^500/$', TemplateView.as_view(template_name='500.html')),
+ ]
| {"golden_diff": "diff --git a/shopelectro/urls.py b/shopelectro/urls.py\n--- a/shopelectro/urls.py\n+++ b/shopelectro/urls.py\n@@ -1,15 +1,15 @@\n-from datetime import timedelta\n from collections import OrderedDict\n+from datetime import timedelta\n \n from django.conf import settings\n from django.conf.urls import url, include\n from django.conf.urls.static import static\n from django.contrib.sitemaps.views import sitemap\n from django.views.decorators.cache import cache_page, never_cache\n+from django.views.generic import TemplateView\n \n-from pages.views import RobotsView, SitemapPage\n from pages.urls import custom_page_url\n-\n+from pages.views import RobotsView, SitemapPage\n from shopelectro import sitemaps, views\n from shopelectro.admin import se_admin\n \n@@ -132,3 +132,11 @@\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n+\n+# Test and Debug environments replace real 404 and 500 error with stack traces.\n+# We expose real 404 and 500 pages with separated urls to test them.\n+if settings.TEST_ENV or settings.DEBUG:\n+ urlpatterns += [\n+ url(r'^404/$', TemplateView.as_view(template_name='404.html')),\n+ url(r'^500/$', TemplateView.as_view(template_name='500.html')),\n+ ]\n", "issue": "Create a custom page for 500 error\nThe page should have an apology and an suggestion to continue the purchase with the help of a consultant\n", "before_files": [{"content": "from datetime import timedelta\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page, never_cache\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n # these pages should show only actual state\n custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}], "after_files": [{"content": "from collections import OrderedDict\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page, never_cache\nfrom django.views.generic import TemplateView\n\nfrom pages.urls import custom_page_url\nfrom pages.views import RobotsView, SitemapPage\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n # these pages should show only actual state\n custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n\n# Test and Debug environments replace real 404 and 500 error with stack traces.\n# We expose real 404 and 500 pages with separated urls to test them.\nif settings.TEST_ENV or settings.DEBUG:\n urlpatterns += [\n url(r'^404/$', TemplateView.as_view(template_name='404.html')),\n url(r'^500/$', TemplateView.as_view(template_name='500.html')),\n ]\n", "path": "shopelectro/urls.py"}]} | 2,018 | 324 |
gh_patches_debug_56718 | rasdani/github-patches | git_diff | mosaicml__composer-293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ResNet56 default num_classes argument
## 🚀 Feature Request
The `num_classes` argument for [ResNet56_cifar10](https://github.com/mosaicml/composer/blob/main/composer/models/resnet56_cifar10/model.py) should have a default value `num_classes=10`.
## Motivation
It felt silly when writing a demo notebook to have to specify `num_classes=10` when calling `composer.models.CIFAR10_ResNet56(num_classes=10)`. The model has "cifar10" in its name, and even if it didn't, it's most common use is for cifar10.
## Implementation
Does it require any changes beyond the `__init__()` signature?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/models/resnet56_cifar10/model.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from typing import List, Optional
4
5 from composer.models.base import MosaicClassifier
6 from composer.models.model_hparams import Initializer
7 from composer.models.resnets import CIFAR_ResNet
8
9
10 class CIFAR10_ResNet56(MosaicClassifier):
11 """A ResNet-56 model extending :class:`MosaicClassifier`.
12
13 See this `paper <https://arxiv.org/abs/1512.03385>`_ for details
14 on the residual network architecture.
15
16 Args:
17 num_classes (int): The number of classes for the model.
18 initializers (List[Initializer], optional): Initializers
19 for the model. ``None`` for no initialization.
20 (default: ``None``)
21 """
22
23 def __init__(
24 self,
25 num_classes: int,
26 initializers: Optional[List[Initializer]] = None,
27 ) -> None:
28 if initializers is None:
29 initializers = []
30
31 model = CIFAR_ResNet.get_model_from_name(
32 "cifar_resnet_56",
33 initializers,
34 num_classes,
35 )
36 super().__init__(module=model)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/composer/models/resnet56_cifar10/model.py b/composer/models/resnet56_cifar10/model.py
--- a/composer/models/resnet56_cifar10/model.py
+++ b/composer/models/resnet56_cifar10/model.py
@@ -22,7 +22,7 @@
def __init__(
self,
- num_classes: int,
+ num_classes: int = 10,
initializers: Optional[List[Initializer]] = None,
) -> None:
if initializers is None:
| {"golden_diff": "diff --git a/composer/models/resnet56_cifar10/model.py b/composer/models/resnet56_cifar10/model.py\n--- a/composer/models/resnet56_cifar10/model.py\n+++ b/composer/models/resnet56_cifar10/model.py\n@@ -22,7 +22,7 @@\n \n def __init__(\n self,\n- num_classes: int,\n+ num_classes: int = 10,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n", "issue": "ResNet56 default num_classes argument\n## \ud83d\ude80 Feature Request\r\nThe `num_classes` argument for [ResNet56_cifar10](https://github.com/mosaicml/composer/blob/main/composer/models/resnet56_cifar10/model.py) should have a default value `num_classes=10`.\r\n\r\n## Motivation\r\n\r\nIt felt silly when writing a demo notebook to have to specify `num_classes=10` when calling `composer.models.CIFAR10_ResNet56(num_classes=10)`. The model has \"cifar10\" in its name, and even if it didn't, it's most common use is for cifar10.\r\n\r\n## Implementation\r\n\r\nDoes it require any changes beyond the `__init__()` signature?\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom typing import List, Optional\n\nfrom composer.models.base import MosaicClassifier\nfrom composer.models.model_hparams import Initializer\nfrom composer.models.resnets import CIFAR_ResNet\n\n\nclass CIFAR10_ResNet56(MosaicClassifier):\n \"\"\"A ResNet-56 model extending :class:`MosaicClassifier`.\n\n See this `paper <https://arxiv.org/abs/1512.03385>`_ for details\n on the residual network architecture.\n\n Args:\n num_classes (int): The number of classes for the model.\n initializers (List[Initializer], optional): Initializers\n for the model. ``None`` for no initialization.\n (default: ``None``)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n initializers = []\n\n model = CIFAR_ResNet.get_model_from_name(\n \"cifar_resnet_56\",\n initializers,\n num_classes,\n )\n super().__init__(module=model)\n", "path": "composer/models/resnet56_cifar10/model.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom typing import List, Optional\n\nfrom composer.models.base import MosaicClassifier\nfrom composer.models.model_hparams import Initializer\nfrom composer.models.resnets import CIFAR_ResNet\n\n\nclass CIFAR10_ResNet56(MosaicClassifier):\n \"\"\"A ResNet-56 model extending :class:`MosaicClassifier`.\n\n See this `paper <https://arxiv.org/abs/1512.03385>`_ for details\n on the residual network architecture.\n\n Args:\n num_classes (int): The number of classes for the model.\n initializers (List[Initializer], optional): Initializers\n for the model. ``None`` for no initialization.\n (default: ``None``)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int = 10,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n initializers = []\n\n model = CIFAR_ResNet.get_model_from_name(\n \"cifar_resnet_56\",\n initializers,\n num_classes,\n )\n super().__init__(module=model)\n", "path": "composer/models/resnet56_cifar10/model.py"}]} | 756 | 127 |
gh_patches_debug_1114 | rasdani/github-patches | git_diff | Pylons__pyramid-2224 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update to Sphinx 1.3.4 when released
There is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.
When 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21
22 PY3 = py_version[0] == 3
23
24 if PY3:
25 if py_version < (3, 2):
26 raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')
27 else:
28 if py_version < (2, 6):
29 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
30
31 here = os.path.abspath(os.path.dirname(__file__))
32 try:
33 with open(os.path.join(here, 'README.rst')) as f:
34 README = f.read()
35 with open(os.path.join(here, 'CHANGES.txt')) as f:
36 CHANGES = f.read()
37 except IOError:
38 README = CHANGES = ''
39
40 install_requires=[
41 'setuptools',
42 'WebOb >= 1.3.1', # request.domain and CookieProfile
43 'repoze.lru >= 0.4', # py3 compat
44 'zope.interface >= 3.8.0', # has zope.interface.registry
45 'zope.deprecation >= 3.5.0', # py3 compat
46 'venusian >= 1.0a3', # ``ignore``
47 'translationstring >= 0.4', # py3 compat
48 'PasteDeploy >= 1.5.0', # py3 compat
49 ]
50
51 tests_require = [
52 'WebTest >= 1.3.1', # py3 compat
53 ]
54
55 if not PY3:
56 tests_require.append('zope.component>=3.11.0')
57
58 docs_extras = [
59 'Sphinx >= 1.3.1',
60 'docutils',
61 'repoze.sphinx.autointerface',
62 'pylons_sphinx_latesturl',
63 'pylons-sphinx-themes',
64 'sphinxcontrib-programoutput',
65 ]
66
67 testing_extras = tests_require + [
68 'nose',
69 'coverage',
70 'virtualenv', # for scaffolding tests
71 ]
72
73 setup(name='pyramid',
74 version='1.7.dev0',
75 description='The Pyramid Web Framework, a Pylons project',
76 long_description=README + '\n\n' + CHANGES,
77 classifiers=[
78 "Development Status :: 6 - Mature",
79 "Intended Audience :: Developers",
80 "Programming Language :: Python",
81 "Programming Language :: Python :: 2.6",
82 "Programming Language :: Python :: 2.7",
83 "Programming Language :: Python :: 3",
84 "Programming Language :: Python :: 3.2",
85 "Programming Language :: Python :: 3.3",
86 "Programming Language :: Python :: 3.4",
87 "Programming Language :: Python :: 3.5",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Programming Language :: Python :: Implementation :: PyPy",
90 "Framework :: Pyramid",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI",
93 "License :: Repoze Public License",
94 ],
95 keywords='web wsgi pylons pyramid',
96 author="Chris McDonough, Agendaless Consulting",
97 author_email="[email protected]",
98 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
99 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
100 packages=find_packages(),
101 include_package_data=True,
102 zip_safe=False,
103 install_requires = install_requires,
104 extras_require = {
105 'testing':testing_extras,
106 'docs':docs_extras,
107 },
108 tests_require = tests_require,
109 test_suite="pyramid.tests",
110 entry_points = """\
111 [pyramid.scaffold]
112 starter=pyramid.scaffolds:StarterProjectTemplate
113 zodb=pyramid.scaffolds:ZODBProjectTemplate
114 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
115 [pyramid.pshell_runner]
116 python=pyramid.scripts.pshell:python_shell_runner
117 [console_scripts]
118 pcreate = pyramid.scripts.pcreate:main
119 pserve = pyramid.scripts.pserve:main
120 pshell = pyramid.scripts.pshell:main
121 proutes = pyramid.scripts.proutes:main
122 pviews = pyramid.scripts.pviews:main
123 ptweens = pyramid.scripts.ptweens:main
124 prequest = pyramid.scripts.prequest:main
125 pdistreport = pyramid.scripts.pdistreport:main
126 [paste.server_runner]
127 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
128 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
129 """
130 )
131
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.1',
+ 'Sphinx >= 1.3.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.1',\n+ 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Update to Sphinx 1.3.4 when released\nThere is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.\n\nWhen 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.1',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}], "after_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}]} | 1,823 | 106 |
gh_patches_debug_26038 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-473 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AW Harburg throws
With 1.28.0 fetching the calendar for aw_harburg_de stopped working.
Last working version probably was 1.26.x +- ... (skipped some updates)
config:```
```
waste_collection_schedule:
sources:
- name: aw_harburg_de
args:
level_1: "XXX"
level_2: "YYY"
customize:
- type: Hausmüll 14-täglich
alias: "waste"
icon: mdi:trash-can
- type: Gelber Sack
alias: "recycle"
icon: mdi:recycle
- type: Altpapier
alias: "paper"
icon: mdi:trash-can-outline
fetch_time: "04:00"
day_switch_time: "10:00"
```
level_1 + 2 are set to real levels according to website and were not changed
Pls find log attached
```
Logger: waste_collection_schedule.scraper
Source: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:143
Integration: waste_collection_schedule ([documentation](https://github.com/mampfes/hacs_waste_collection_schedule#readme))
First occurred: 17:00:41 (1 occurrences)
Last logged: 17:00:41
fetch failed for source AW Harburg: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py", line 141, in fetch entries = self._source.fetch() File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py", line 108, in fetch dates = self._ics.convert(r.text) File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS.py", line 38, in convert events: List[Any] = icalevents.events( File "/usr/local/lib/python3.10/site-packages/icalevents/icalevents.py", line 50, in events found_events += parse_events(content, start=start, end=end) File "/usr/local/lib/python3.10/site-packages/icalevents/icalparser.py", line 250, in parse_events raise ValueError('Content is invalid!') ValueError: Content is invalid!
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py`
Content:
```
1 import requests
2 from bs4 import BeautifulSoup
3 from waste_collection_schedule import Collection # type: ignore[attr-defined]
4 from waste_collection_schedule.service.ICS import ICS
5
6 TITLE = "AW Harburg"
7 DESCRIPTION = "Abfallwirtschaft Landkreis Harburg"
8 URL = "https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/"
9
10 TEST_CASES = {
11 "CityWithTwoLevels": {"level_1": "Hanstedt", "level_2": "Evendorf"},
12 "CityWithThreeLevels": {
13 "level_1": "Buchholz",
14 "level_2": "Buchholz mit Steinbeck (ohne Reindorf)",
15 "level_3": "Seppenser Mühlenweg Haus-Nr. 1 / 2",
16 },
17 }
18
19 HEADERS = {
20 "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)",
21 }
22
23
24 class Source:
25 def __init__(self, level_1, level_2, level_3=None):
26 self._districts = [level_1, level_2, level_3]
27 self._ics = ICS()
28
29 def fetch(self):
30 # Use a session to keep cookies and stuff
31 session = requests.Session()
32
33 # Get the IDs of the districts on the first level
34 # Double loading is on purpose because sometimes the webpage has an overlay
35 # which is gone on the second try in a session
36 r = session.get(URL, headers=HEADERS)
37 if "Zur aufgerufenen Seite" in r.text:
38 r = session.get(URL, headers=HEADERS)
39 if r.status_code != 200:
40 raise Exception(f"Error: failed to fetch first url: {URL}")
41
42 # Get the IDs of the districts on the first level
43 id = self.parse_level(r.text, 1)
44
45 # Get the IDs of the districts on the second level
46 url = (
47 "https://www.landkreis-harburg.de/ajax/abfall_gebiete_struktur_select.html"
48 )
49 params = {
50 "parent": id,
51 "ebene": 1,
52 "portal": 1,
53 "selected_ebene": 0,
54 }
55 r = session.get(url, params=params, headers=HEADERS)
56 if r.status_code != 200:
57 raise Exception(f"Error: failed to fetch second url: {url}")
58
59 # Get the IDs of the districts on the second level
60 id = self.parse_level(r.text, 2)
61
62 # Get the IDs of the third level - if applicable
63 if self._districts[3 - 1] is not None:
64 # Get the IDs of the districts on the third level
65 params = {
66 "parent": id,
67 "ebene": 2,
68 "portal": 1,
69 "selected_ebene": 0,
70 }
71 r = session.get(url, params=params, headers=HEADERS)
72 if r.status_code != 200:
73 raise Exception(f"Error: failed to fetch third url: {url}")
74
75 # Get the IDs of the districts on the third level
76 id = self.parse_level(r.text, 3)
77
78 # Prepare data for the real web request
79 url = "https://www.landkreis-harburg.de/abfallkalender/abfallkalender_struktur_daten_suche.html"
80 params = {
81 "selected_ebene": id,
82 "owner": 20100,
83 }
84 r = session.get(url, params=params, headers=HEADERS)
85
86 # Sometimes there is no garbage calendar available
87 if "Es sind keine Abfuhrbezirke hinterlegt." in r.text:
88 raise Exception(
89 f'Error: "Es sind keine Abfuhrbezirke hinterlegt." for "{self._districts[3-1]}". Please use different input data.'
90 )
91
92 soup = BeautifulSoup(r.text, features="html.parser")
93 links = soup.find_all("a")
94 ical_url = ""
95 for any_link in links:
96 if " als iCal" in any_link.text:
97 ical_url = any_link.get("href")
98
99 if "ical.html" not in ical_url:
100 raise Exception("No ical Link in the result: " + str(links))
101
102 # Get the final data
103 r = requests.get(ical_url, headers=HEADERS)
104 if not r.ok:
105 raise Exception(f"Error: failed to fetch url: {ical_url}")
106
107 # Parse ics file
108 dates = self._ics.convert(r.text)
109
110 entries = []
111 for d in dates:
112 entries.append(Collection(d[0], d[1]))
113 return entries
114
115 def parse_level(self, response, level):
116 soup = BeautifulSoup(response, features="html.parser")
117 select_content = soup.find_all("select", id=f"strukturEbene{level}")
118 soup = BeautifulSoup(str(select_content), features="html.parser")
119 options_content = soup.find_all("option")
120 level_ids = {}
121 for option in options_content:
122 # Ignore the "Bitte wählen..."
123 if option.get("value") != "0":
124 level_ids[option.text] = option.get("value")
125
126 if level_ids == {}:
127 raise Exception(f"Error: Level {level} Dictionary empty")
128
129 if self._districts[level - 1] not in level_ids:
130 raise Exception(
131 f"Error: District {self._districts[level]} is not in the dictionary: {level_ids}"
132 )
133
134 return level_ids[self._districts[level - 1]]
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py
@@ -91,25 +91,26 @@
soup = BeautifulSoup(r.text, features="html.parser")
links = soup.find_all("a")
- ical_url = ""
+ ical_urls = []
for any_link in links:
if " als iCal" in any_link.text:
- ical_url = any_link.get("href")
-
- if "ical.html" not in ical_url:
- raise Exception("No ical Link in the result: " + str(links))
-
- # Get the final data
- r = requests.get(ical_url, headers=HEADERS)
- if not r.ok:
- raise Exception(f"Error: failed to fetch url: {ical_url}")
-
- # Parse ics file
- dates = self._ics.convert(r.text)
+ # multiple links occur during year transition
+ ical_urls.append(any_link.get("href"))
+ # Get the final data for all links
entries = []
- for d in dates:
- entries.append(Collection(d[0], d[1]))
+ for ical_url in ical_urls:
+ r = requests.get(ical_url, headers=HEADERS)
+ r.raise_for_status()
+
+ # Parse ics file
+ try:
+ dates = self._ics.convert(r.text)
+
+ for d in dates:
+ entries.append(Collection(d[0], d[1]))
+ except ValueError:
+ pass # during year transition the ical for the next year may be empty
return entries
def parse_level(self, response, level):
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\n@@ -91,25 +91,26 @@\n \n soup = BeautifulSoup(r.text, features=\"html.parser\")\n links = soup.find_all(\"a\")\n- ical_url = \"\"\n+ ical_urls = []\n for any_link in links:\n if \" als iCal\" in any_link.text:\n- ical_url = any_link.get(\"href\")\n-\n- if \"ical.html\" not in ical_url:\n- raise Exception(\"No ical Link in the result: \" + str(links))\n-\n- # Get the final data\n- r = requests.get(ical_url, headers=HEADERS)\n- if not r.ok:\n- raise Exception(f\"Error: failed to fetch url: {ical_url}\")\n-\n- # Parse ics file\n- dates = self._ics.convert(r.text)\n+ # multiple links occur during year transition\n+ ical_urls.append(any_link.get(\"href\"))\n \n+ # Get the final data for all links\n entries = []\n- for d in dates:\n- entries.append(Collection(d[0], d[1]))\n+ for ical_url in ical_urls:\n+ r = requests.get(ical_url, headers=HEADERS)\n+ r.raise_for_status()\n+\n+ # Parse ics file\n+ try:\n+ dates = self._ics.convert(r.text)\n+\n+ for d in dates:\n+ entries.append(Collection(d[0], d[1]))\n+ except ValueError:\n+ pass # during year transition the ical for the next year may be empty\n return entries\n \n def parse_level(self, response, level):\n", "issue": "AW Harburg throws\nWith 1.28.0 fetching the calendar for aw_harburg_de stopped working.\r\nLast working version probably was 1.26.x +- ... (skipped some updates)\r\nconfig:```\r\n```\r\n\r\nwaste_collection_schedule:\r\n sources:\r\n - name: aw_harburg_de\r\n args:\r\n level_1: \"XXX\"\r\n level_2: \"YYY\"\r\n customize:\r\n - type: Hausm\u00fcll 14-t\u00e4glich\r\n alias: \"waste\"\r\n icon: mdi:trash-can\r\n - type: Gelber Sack\r\n alias: \"recycle\"\r\n icon: mdi:recycle\r\n - type: Altpapier\r\n alias: \"paper\"\r\n icon: mdi:trash-can-outline\r\n fetch_time: \"04:00\"\r\n day_switch_time: \"10:00\"\r\n```\r\nlevel_1 + 2 are set to real levels according to website and were not changed\r\nPls find log attached\r\n```\r\n\r\nLogger: waste_collection_schedule.scraper\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:143\r\nIntegration: waste_collection_schedule ([documentation](https://github.com/mampfes/hacs_waste_collection_schedule#readme))\r\nFirst occurred: 17:00:41 (1 occurrences)\r\nLast logged: 17:00:41\r\nfetch failed for source AW Harburg: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py\", line 141, in fetch entries = self._source.fetch() File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\", line 108, in fetch dates = self._ics.convert(r.text) File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS.py\", line 38, in convert events: List[Any] = icalevents.events( File \"/usr/local/lib/python3.10/site-packages/icalevents/icalevents.py\", line 50, in events found_events += parse_events(content, start=start, end=end) File \"/usr/local/lib/python3.10/site-packages/icalevents/icalparser.py\", line 250, in parse_events raise ValueError('Content is invalid!') ValueError: Content is invalid!\r\n```\n", "before_files": [{"content": "import requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"AW Harburg\"\nDESCRIPTION = \"Abfallwirtschaft Landkreis Harburg\"\nURL = \"https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/\"\n\nTEST_CASES = {\n \"CityWithTwoLevels\": {\"level_1\": \"Hanstedt\", \"level_2\": \"Evendorf\"},\n \"CityWithThreeLevels\": {\n \"level_1\": \"Buchholz\",\n \"level_2\": \"Buchholz mit Steinbeck (ohne Reindorf)\",\n \"level_3\": \"Seppenser M\u00fchlenweg Haus-Nr. 1 / 2\",\n },\n}\n\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64)\",\n}\n\n\nclass Source:\n def __init__(self, level_1, level_2, level_3=None):\n self._districts = [level_1, level_2, level_3]\n self._ics = ICS()\n\n def fetch(self):\n # Use a session to keep cookies and stuff\n session = requests.Session()\n\n # Get the IDs of the districts on the first level\n # Double loading is on purpose because sometimes the webpage has an overlay\n # which is gone on the second try in a session\n r = session.get(URL, headers=HEADERS)\n if \"Zur aufgerufenen Seite\" in r.text:\n r = session.get(URL, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch first url: {URL}\")\n\n # Get the IDs of the districts on the first level\n id = self.parse_level(r.text, 1)\n\n # Get the IDs of the districts on the second level\n url = (\n \"https://www.landkreis-harburg.de/ajax/abfall_gebiete_struktur_select.html\"\n )\n params = {\n \"parent\": id,\n \"ebene\": 1,\n \"portal\": 1,\n \"selected_ebene\": 0,\n }\n r = session.get(url, params=params, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch second url: {url}\")\n\n # Get the IDs of the districts on the second level\n id = self.parse_level(r.text, 2)\n\n # Get the IDs of the third level - if applicable\n if self._districts[3 - 1] is not None:\n # Get the IDs of the districts on the third level\n params = {\n \"parent\": id,\n \"ebene\": 2,\n \"portal\": 1,\n \"selected_ebene\": 0,\n }\n r = session.get(url, params=params, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch third url: {url}\")\n\n # Get the IDs of the districts on the third level\n id = self.parse_level(r.text, 3)\n\n # Prepare data for the real web request\n url = \"https://www.landkreis-harburg.de/abfallkalender/abfallkalender_struktur_daten_suche.html\"\n params = {\n \"selected_ebene\": id,\n \"owner\": 20100,\n }\n r = session.get(url, params=params, headers=HEADERS)\n\n # Sometimes there is no garbage calendar available\n if \"Es sind keine Abfuhrbezirke hinterlegt.\" in r.text:\n raise Exception(\n f'Error: \"Es sind keine Abfuhrbezirke hinterlegt.\" for \"{self._districts[3-1]}\". Please use different input data.'\n )\n\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n links = soup.find_all(\"a\")\n ical_url = \"\"\n for any_link in links:\n if \" als iCal\" in any_link.text:\n ical_url = any_link.get(\"href\")\n\n if \"ical.html\" not in ical_url:\n raise Exception(\"No ical Link in the result: \" + str(links))\n\n # Get the final data\n r = requests.get(ical_url, headers=HEADERS)\n if not r.ok:\n raise Exception(f\"Error: failed to fetch url: {ical_url}\")\n\n # Parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n\n def parse_level(self, response, level):\n soup = BeautifulSoup(response, features=\"html.parser\")\n select_content = soup.find_all(\"select\", id=f\"strukturEbene{level}\")\n soup = BeautifulSoup(str(select_content), features=\"html.parser\")\n options_content = soup.find_all(\"option\")\n level_ids = {}\n for option in options_content:\n # Ignore the \"Bitte w\u00e4hlen...\"\n if option.get(\"value\") != \"0\":\n level_ids[option.text] = option.get(\"value\")\n\n if level_ids == {}:\n raise Exception(f\"Error: Level {level} Dictionary empty\")\n\n if self._districts[level - 1] not in level_ids:\n raise Exception(\n f\"Error: District {self._districts[level]} is not in the dictionary: {level_ids}\"\n )\n\n return level_ids[self._districts[level - 1]]\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py"}], "after_files": [{"content": "import requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"AW Harburg\"\nDESCRIPTION = \"Abfallwirtschaft Landkreis Harburg\"\nURL = \"https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/\"\n\nTEST_CASES = {\n \"CityWithTwoLevels\": {\"level_1\": \"Hanstedt\", \"level_2\": \"Evendorf\"},\n \"CityWithThreeLevels\": {\n \"level_1\": \"Buchholz\",\n \"level_2\": \"Buchholz mit Steinbeck (ohne Reindorf)\",\n \"level_3\": \"Seppenser M\u00fchlenweg Haus-Nr. 1 / 2\",\n },\n}\n\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64)\",\n}\n\n\nclass Source:\n def __init__(self, level_1, level_2, level_3=None):\n self._districts = [level_1, level_2, level_3]\n self._ics = ICS()\n\n def fetch(self):\n # Use a session to keep cookies and stuff\n session = requests.Session()\n\n # Get the IDs of the districts on the first level\n # Double loading is on purpose because sometimes the webpage has an overlay\n # which is gone on the second try in a session\n r = session.get(URL, headers=HEADERS)\n if \"Zur aufgerufenen Seite\" in r.text:\n r = session.get(URL, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch first url: {URL}\")\n\n # Get the IDs of the districts on the first level\n id = self.parse_level(r.text, 1)\n\n # Get the IDs of the districts on the second level\n url = (\n \"https://www.landkreis-harburg.de/ajax/abfall_gebiete_struktur_select.html\"\n )\n params = {\n \"parent\": id,\n \"ebene\": 1,\n \"portal\": 1,\n \"selected_ebene\": 0,\n }\n r = session.get(url, params=params, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch second url: {url}\")\n\n # Get the IDs of the districts on the second level\n id = self.parse_level(r.text, 2)\n\n # Get the IDs of the third level - if applicable\n if self._districts[3 - 1] is not None:\n # Get the IDs of the districts on the third level\n params = {\n \"parent\": id,\n \"ebene\": 2,\n \"portal\": 1,\n \"selected_ebene\": 0,\n }\n r = session.get(url, params=params, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch third url: {url}\")\n\n # Get the IDs of the districts on the third level\n id = self.parse_level(r.text, 3)\n\n # Prepare data for the real web request\n url = \"https://www.landkreis-harburg.de/abfallkalender/abfallkalender_struktur_daten_suche.html\"\n params = {\n \"selected_ebene\": id,\n \"owner\": 20100,\n }\n r = session.get(url, params=params, headers=HEADERS)\n\n # Sometimes there is no garbage calendar available\n if \"Es sind keine Abfuhrbezirke hinterlegt.\" in r.text:\n raise Exception(\n f'Error: \"Es sind keine Abfuhrbezirke hinterlegt.\" for \"{self._districts[3-1]}\". Please use different input data.'\n )\n\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n links = soup.find_all(\"a\")\n ical_urls = []\n for any_link in links:\n if \" als iCal\" in any_link.text:\n # multiple links occur during year transition\n ical_urls.append(any_link.get(\"href\"))\n\n # Get the final data for all links\n entries = []\n for ical_url in ical_urls:\n r = requests.get(ical_url, headers=HEADERS)\n r.raise_for_status()\n\n # Parse ics file\n try:\n dates = self._ics.convert(r.text)\n\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n except ValueError:\n pass # during year transition the ical for the next year may be empty\n return entries\n\n def parse_level(self, response, level):\n soup = BeautifulSoup(response, features=\"html.parser\")\n select_content = soup.find_all(\"select\", id=f\"strukturEbene{level}\")\n soup = BeautifulSoup(str(select_content), features=\"html.parser\")\n options_content = soup.find_all(\"option\")\n level_ids = {}\n for option in options_content:\n # Ignore the \"Bitte w\u00e4hlen...\"\n if option.get(\"value\") != \"0\":\n level_ids[option.text] = option.get(\"value\")\n\n if level_ids == {}:\n raise Exception(f\"Error: Level {level} Dictionary empty\")\n\n if self._districts[level - 1] not in level_ids:\n raise Exception(\n f\"Error: District {self._districts[level]} is not in the dictionary: {level_ids}\"\n )\n\n return level_ids[self._districts[level - 1]]\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py"}]} | 2,331 | 439 |
gh_patches_debug_25116 | rasdani/github-patches | git_diff | lutris__lutris-2682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failure to read Steam's config.vdf due to wrong case
Lutris can't read Steam's config.vdf file because the "Steam" value is actually lowercase when Lutris expects it to be uppercase.

Same as #1966.
Failure to read Steam's config.vdf due to wrong case
Lutris can't read Steam's config.vdf file because the "Steam" value is actually lowercase when Lutris expects it to be uppercase.

Same as #1966.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/steam/config.py`
Content:
```
1 """Handle Steam configuration"""
2 import os
3 from collections import OrderedDict, defaultdict
4
5 from lutris.util import system
6 from lutris.util.log import logger
7 from lutris.util.steam.vdf import vdf_parse
8
9
10 def get_default_acf(appid, name):
11 """Return a default configuration usable to
12 create a runnable game in Steam"""
13
14 userconfig = OrderedDict()
15 userconfig["name"] = name
16 userconfig["gameid"] = appid
17
18 appstate = OrderedDict()
19 appstate["appID"] = appid
20 appstate["Universe"] = "1"
21 appstate["StateFlags"] = "1026"
22 appstate["installdir"] = name
23 appstate["UserConfig"] = userconfig
24 return {"AppState": appstate}
25
26
27 def read_config(steam_data_dir):
28 """Read the Steam configuration and return it as an object"""
29 config_filename = os.path.join(steam_data_dir, "config/config.vdf")
30 if not system.path_exists(config_filename):
31 return None
32 with open(config_filename, "r") as steam_config_file:
33 config = vdf_parse(steam_config_file, {})
34 try:
35 return config["InstallConfigStore"]["Software"]["Valve"]["Steam"]
36 except KeyError:
37 try:
38 return config["InstallConfigStore"]["Software"]["valve"]["Steam"]
39 except KeyError as ex:
40 logger.error("Steam config %s is empty: %s", config_filename, ex)
41
42
43 def get_steamapps_paths_for_platform(platform_name):
44 """
45 """
46 from lutris.runners import winesteam, steam
47
48 runners = {"linux": steam.steam, "windows": winesteam.winesteam}
49 runner = runners[platform_name]()
50 return runner.get_steamapps_dirs()
51
52
53 def get_steamapps_paths(flat=False, platform=None):
54 base_platforms = ["linux", "windows"]
55 if flat:
56 steamapps_paths = []
57 else:
58 steamapps_paths = defaultdict(list)
59
60 if platform:
61 if platform not in base_platforms:
62 raise ValueError("Illegal value for Steam platform: %s" % platform)
63 platforms = [platform]
64 else:
65 platforms = base_platforms
66
67 for _platform in platforms:
68 folders = get_steamapps_paths_for_platform(_platform)
69 if flat:
70 steamapps_paths += folders
71 else:
72 steamapps_paths[_platform] = folders
73
74 return steamapps_paths
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/util/steam/config.py b/lutris/util/steam/config.py
--- a/lutris/util/steam/config.py
+++ b/lutris/util/steam/config.py
@@ -26,18 +26,25 @@
def read_config(steam_data_dir):
"""Read the Steam configuration and return it as an object"""
+
+ def get_entry_case_insensitive(config_dict, path):
+ for key, value in config_dict.items():
+ if key.lower() == path[0].lower():
+ if len(path) <= 1:
+ return config_dict[key]
+
+ return get_entry_case_insensitive(config_dict[key], path[1:])
+ raise KeyError(path[0])
+
config_filename = os.path.join(steam_data_dir, "config/config.vdf")
if not system.path_exists(config_filename):
return None
with open(config_filename, "r") as steam_config_file:
config = vdf_parse(steam_config_file, {})
try:
- return config["InstallConfigStore"]["Software"]["Valve"]["Steam"]
- except KeyError:
- try:
- return config["InstallConfigStore"]["Software"]["valve"]["Steam"]
- except KeyError as ex:
- logger.error("Steam config %s is empty: %s", config_filename, ex)
+ return get_entry_case_insensitive(config, ["InstallConfigStore", "Software", "Valve", "Steam"])
+ except KeyError as ex:
+ logger.error("Steam config %s is empty: %s", config_filename, ex)
def get_steamapps_paths_for_platform(platform_name):
| {"golden_diff": "diff --git a/lutris/util/steam/config.py b/lutris/util/steam/config.py\n--- a/lutris/util/steam/config.py\n+++ b/lutris/util/steam/config.py\n@@ -26,18 +26,25 @@\n \n def read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n+\n+ def get_entry_case_insensitive(config_dict, path):\n+ for key, value in config_dict.items():\n+ if key.lower() == path[0].lower():\n+ if len(path) <= 1:\n+ return config_dict[key]\n+\n+ return get_entry_case_insensitive(config_dict[key], path[1:])\n+ raise KeyError(path[0])\n+\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n- return config[\"InstallConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"]\n- except KeyError:\n- try:\n- return config[\"InstallConfigStore\"][\"Software\"][\"valve\"][\"Steam\"]\n- except KeyError as ex:\n- logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n+ return get_entry_case_insensitive(config, [\"InstallConfigStore\", \"Software\", \"Valve\", \"Steam\"])\n+ except KeyError as ex:\n+ logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n \n \n def get_steamapps_paths_for_platform(platform_name):\n", "issue": "Failure to read Steam's config.vdf due to wrong case\nLutris can't read Steam's config.vdf file because the \"Steam\" value is actually lowercase when Lutris expects it to be uppercase.\r\n\r\n\r\n\r\nSame as #1966.\nFailure to read Steam's config.vdf due to wrong case\nLutris can't read Steam's config.vdf file because the \"Steam\" value is actually lowercase when Lutris expects it to be uppercase.\r\n\r\n\r\n\r\nSame as #1966.\n", "before_files": [{"content": "\"\"\"Handle Steam configuration\"\"\"\nimport os\nfrom collections import OrderedDict, defaultdict\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.util.steam.vdf import vdf_parse\n\n\ndef get_default_acf(appid, name):\n \"\"\"Return a default configuration usable to\n create a runnable game in Steam\"\"\"\n\n userconfig = OrderedDict()\n userconfig[\"name\"] = name\n userconfig[\"gameid\"] = appid\n\n appstate = OrderedDict()\n appstate[\"appID\"] = appid\n appstate[\"Universe\"] = \"1\"\n appstate[\"StateFlags\"] = \"1026\"\n appstate[\"installdir\"] = name\n appstate[\"UserConfig\"] = userconfig\n return {\"AppState\": appstate}\n\n\ndef read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n return config[\"InstallConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"]\n except KeyError:\n try:\n return config[\"InstallConfigStore\"][\"Software\"][\"valve\"][\"Steam\"]\n except KeyError as ex:\n logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n\n\ndef get_steamapps_paths_for_platform(platform_name):\n \"\"\"\n \"\"\"\n from lutris.runners import winesteam, steam\n\n runners = {\"linux\": steam.steam, \"windows\": winesteam.winesteam}\n runner = runners[platform_name]()\n return runner.get_steamapps_dirs()\n\n\ndef get_steamapps_paths(flat=False, platform=None):\n base_platforms = [\"linux\", \"windows\"]\n if flat:\n steamapps_paths = []\n else:\n steamapps_paths = defaultdict(list)\n\n if platform:\n if platform not in base_platforms:\n raise ValueError(\"Illegal value for Steam platform: %s\" % platform)\n platforms = [platform]\n else:\n platforms = base_platforms\n\n for _platform in platforms:\n folders = get_steamapps_paths_for_platform(_platform)\n if flat:\n steamapps_paths += folders\n else:\n steamapps_paths[_platform] = folders\n\n return steamapps_paths\n", "path": "lutris/util/steam/config.py"}], "after_files": [{"content": "\"\"\"Handle Steam configuration\"\"\"\nimport os\nfrom collections import OrderedDict, defaultdict\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.util.steam.vdf import vdf_parse\n\n\ndef get_default_acf(appid, name):\n \"\"\"Return a default configuration usable to\n create a runnable game in Steam\"\"\"\n\n userconfig = OrderedDict()\n userconfig[\"name\"] = name\n userconfig[\"gameid\"] = appid\n\n appstate = OrderedDict()\n appstate[\"appID\"] = appid\n appstate[\"Universe\"] = \"1\"\n appstate[\"StateFlags\"] = \"1026\"\n appstate[\"installdir\"] = name\n appstate[\"UserConfig\"] = userconfig\n return {\"AppState\": appstate}\n\n\ndef read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n\n def get_entry_case_insensitive(config_dict, path):\n for key, value in config_dict.items():\n if key.lower() == path[0].lower():\n if len(path) <= 1:\n return config_dict[key]\n\n return get_entry_case_insensitive(config_dict[key], path[1:])\n raise KeyError(path[0])\n\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n return get_entry_case_insensitive(config, [\"InstallConfigStore\", \"Software\", \"Valve\", \"Steam\"])\n except KeyError as ex:\n logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n\n\ndef get_steamapps_paths_for_platform(platform_name):\n \"\"\"\n \"\"\"\n from lutris.runners import winesteam, steam\n\n runners = {\"linux\": steam.steam, \"windows\": winesteam.winesteam}\n runner = runners[platform_name]()\n return runner.get_steamapps_dirs()\n\n\ndef get_steamapps_paths(flat=False, platform=None):\n base_platforms = [\"linux\", \"windows\"]\n if flat:\n steamapps_paths = []\n else:\n steamapps_paths = defaultdict(list)\n\n if platform:\n if platform not in base_platforms:\n raise ValueError(\"Illegal value for Steam platform: %s\" % platform)\n platforms = [platform]\n else:\n platforms = base_platforms\n\n for _platform in platforms:\n folders = get_steamapps_paths_for_platform(_platform)\n if flat:\n steamapps_paths += folders\n else:\n steamapps_paths[_platform] = folders\n\n return steamapps_paths\n", "path": "lutris/util/steam/config.py"}]} | 1,171 | 353 |
gh_patches_debug_29875 | rasdani/github-patches | git_diff | streamlink__streamlink-1727 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request: Add more functions to facebook plugin
### Checklist
- [x] This is a bug report.
- [ ] This is a feature request.
- [x] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
Reminder that with the new initial support of Mpeg Dash #880 and #990 might be fixable now, depending on what streamlink supports and how Facebook's videos and livestreaming has changed since this was last looked it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/facebook.py`
Content:
```
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.stream import HLSStream
5
6 _playlist_url = "https://www.facebook.com/video/playback/playlist.m3u8?v={0}"
7
8 _url_re = re.compile(r"http(s)?://(www\.)?facebook\.com/[^/]+/videos/(?P<video_id>\d+)")
9
10
11 class Facebook(Plugin):
12 @classmethod
13 def can_handle_url(cls, url):
14 return _url_re.match(url)
15
16 @Plugin.broken(990)
17 def _get_streams(self):
18 match = _url_re.match(self.url)
19 video = match.group("video_id")
20
21 playlist = _playlist_url.format(video)
22
23 return HLSStream.parse_variant_playlist(self.session, playlist)
24
25
26 __plugin__ = Facebook
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/facebook.py b/src/streamlink/plugins/facebook.py
--- a/src/streamlink/plugins/facebook.py
+++ b/src/streamlink/plugins/facebook.py
@@ -1,26 +1,42 @@
import re
from streamlink.plugin import Plugin
-from streamlink.stream import HLSStream
-
-_playlist_url = "https://www.facebook.com/video/playback/playlist.m3u8?v={0}"
-
-_url_re = re.compile(r"http(s)?://(www\.)?facebook\.com/[^/]+/videos/(?P<video_id>\d+)")
+from streamlink.plugin.api import http, useragents
+from streamlink.stream import DASHStream, HTTPStream
+from streamlink.utils import parse_json
class Facebook(Plugin):
+ _url_re = re.compile(r"https?://(?:www\.)?facebook\.com/[^/]+/videos")
+ _mpd_re = re.compile(r'''(sd|hd)_src["']?\s*:\s*(?P<quote>["'])(?P<url>.+?)(?P=quote)''')
+ _playlist_re = re.compile(r'''video:\[({url:".+?}\])''')
+ _plurl_re = re.compile(r'''url:"(.*?)"''')
+
@classmethod
def can_handle_url(cls, url):
- return _url_re.match(url)
+ return cls._url_re.match(url)
- @Plugin.broken(990)
def _get_streams(self):
- match = _url_re.match(self.url)
- video = match.group("video_id")
+ res = http.get(self.url, headers={"User-Agent": useragents.CHROME})
+ with open("temp.html", "w") as f:
+ f.write(res.text)
+
+ for match in self._mpd_re.finditer(res.text):
+ manifest_url = match.group("url")
+ if "\\/" in manifest_url:
+ # if the URL is json encoded, decode it
+ manifest_url = parse_json("\"{}\"".format(manifest_url))
+ for s in DASHStream.parse_manifest(self.session, manifest_url).items():
+ yield s
+ else:
+ match = self._playlist_re.search(res.text)
+ playlist = match and match.group(1)
+ if playlist:
+ for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:
+ yield "live", HTTPStream(self.session, url)
+
- playlist = _playlist_url.format(video)
- return HLSStream.parse_variant_playlist(self.session, playlist)
__plugin__ = Facebook
| {"golden_diff": "diff --git a/src/streamlink/plugins/facebook.py b/src/streamlink/plugins/facebook.py\n--- a/src/streamlink/plugins/facebook.py\n+++ b/src/streamlink/plugins/facebook.py\n@@ -1,26 +1,42 @@\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.stream import HLSStream\n-\n-_playlist_url = \"https://www.facebook.com/video/playback/playlist.m3u8?v={0}\"\n-\n-_url_re = re.compile(r\"http(s)?://(www\\.)?facebook\\.com/[^/]+/videos/(?P<video_id>\\d+)\")\n+from streamlink.plugin.api import http, useragents\n+from streamlink.stream import DASHStream, HTTPStream\n+from streamlink.utils import parse_json\n \n \n class Facebook(Plugin):\n+ _url_re = re.compile(r\"https?://(?:www\\.)?facebook\\.com/[^/]+/videos\")\n+ _mpd_re = re.compile(r'''(sd|hd)_src[\"']?\\s*:\\s*(?P<quote>[\"'])(?P<url>.+?)(?P=quote)''')\n+ _playlist_re = re.compile(r'''video:\\[({url:\".+?}\\])''')\n+ _plurl_re = re.compile(r'''url:\"(.*?)\"''')\n+\n @classmethod\n def can_handle_url(cls, url):\n- return _url_re.match(url)\n+ return cls._url_re.match(url)\n \n- @Plugin.broken(990)\n def _get_streams(self):\n- match = _url_re.match(self.url)\n- video = match.group(\"video_id\")\n+ res = http.get(self.url, headers={\"User-Agent\": useragents.CHROME})\n+ with open(\"temp.html\", \"w\") as f:\n+ f.write(res.text)\n+\n+ for match in self._mpd_re.finditer(res.text):\n+ manifest_url = match.group(\"url\")\n+ if \"\\\\/\" in manifest_url:\n+ # if the URL is json encoded, decode it\n+ manifest_url = parse_json(\"\\\"{}\\\"\".format(manifest_url))\n+ for s in DASHStream.parse_manifest(self.session, manifest_url).items():\n+ yield s\n+ else:\n+ match = self._playlist_re.search(res.text)\n+ playlist = match and match.group(1)\n+ if playlist:\n+ for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:\n+ yield \"live\", HTTPStream(self.session, url)\n+\n \n- playlist = _playlist_url.format(video)\n \n- return HLSStream.parse_variant_playlist(self.session, playlist)\n \n \n __plugin__ = Facebook\n", "issue": "Request: Add more functions to facebook plugin\n### Checklist\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [x] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n### Description\r\nReminder that with the new initial support of Mpeg Dash #880 and #990 might be fixable now, depending on what streamlink supports and how Facebook's videos and livestreaming has changed since this was last looked it.\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HLSStream\n\n_playlist_url = \"https://www.facebook.com/video/playback/playlist.m3u8?v={0}\"\n\n_url_re = re.compile(r\"http(s)?://(www\\.)?facebook\\.com/[^/]+/videos/(?P<video_id>\\d+)\")\n\n\nclass Facebook(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @Plugin.broken(990)\n def _get_streams(self):\n match = _url_re.match(self.url)\n video = match.group(\"video_id\")\n\n playlist = _playlist_url.format(video)\n\n return HLSStream.parse_variant_playlist(self.session, playlist)\n\n\n__plugin__ = Facebook\n", "path": "src/streamlink/plugins/facebook.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, useragents\nfrom streamlink.stream import DASHStream, HTTPStream\nfrom streamlink.utils import parse_json\n\n\nclass Facebook(Plugin):\n _url_re = re.compile(r\"https?://(?:www\\.)?facebook\\.com/[^/]+/videos\")\n _mpd_re = re.compile(r'''(sd|hd)_src[\"']?\\s*:\\s*(?P<quote>[\"'])(?P<url>.+?)(?P=quote)''')\n _playlist_re = re.compile(r'''video:\\[({url:\".+?}\\])''')\n _plurl_re = re.compile(r'''url:\"(.*?)\"''')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url, headers={\"User-Agent\": useragents.CHROME})\n with open(\"temp.html\", \"w\") as f:\n f.write(res.text)\n\n for match in self._mpd_re.finditer(res.text):\n manifest_url = match.group(\"url\")\n if \"\\\\/\" in manifest_url:\n # if the URL is json encoded, decode it\n manifest_url = parse_json(\"\\\"{}\\\"\".format(manifest_url))\n for s in DASHStream.parse_manifest(self.session, manifest_url).items():\n yield s\n else:\n match = self._playlist_re.search(res.text)\n playlist = match and match.group(1)\n if playlist:\n for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:\n yield \"live\", HTTPStream(self.session, url)\n\n\n\n\n\n__plugin__ = Facebook\n", "path": "src/streamlink/plugins/facebook.py"}]} | 590 | 584 |
gh_patches_debug_32233 | rasdani/github-patches | git_diff | ray-project__ray-5117 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tune] Missing keep_checkpoints_num arg in tune
Hi everybody,
Thanks a lot for this lib, I've been using for the past few weeks with TF2 and it works pretty well!
In my current use case, I'm checkpointing regularly to ensure a minimal loss in the face of OOM. I do control TF checkpoints manually to avoid filling my disk space but I still need to dump some ray metadata (iterations count, etc.).
I've noticed that ray create folders like `checkpoint_{iter}` to store those but from `tune` there is no way to tell him that I don't need to keep dozens of folder.
I also noticed that `experiment`, and `trial` classes have the perfect argument: `keep_checkpoints_num`.
Sadly this argument is missing from the `tune` config.
Is there any reasons why it is missing from `tune` arguments?
Have a nice day.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/tune/tune.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import logging
6 import time
7
8 from ray.tune.error import TuneError
9 from ray.tune.experiment import convert_to_experiment_list, Experiment
10 from ray.tune.analysis import ExperimentAnalysis
11 from ray.tune.suggest import BasicVariantGenerator
12 from ray.tune.trial import Trial, DEBUG_PRINT_INTERVAL
13 from ray.tune.ray_trial_executor import RayTrialExecutor
14 from ray.tune.syncer import wait_for_sync
15 from ray.tune.trial_runner import TrialRunner
16 from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
17 FIFOScheduler, MedianStoppingRule)
18 from ray.tune.web_server import TuneServer
19
20 logger = logging.getLogger(__name__)
21
22 _SCHEDULERS = {
23 "FIFO": FIFOScheduler,
24 "MedianStopping": MedianStoppingRule,
25 "HyperBand": HyperBandScheduler,
26 "AsyncHyperBand": AsyncHyperBandScheduler,
27 }
28
29
30 def _make_scheduler(args):
31 if args.scheduler in _SCHEDULERS:
32 return _SCHEDULERS[args.scheduler](**args.scheduler_config)
33 else:
34 raise TuneError("Unknown scheduler: {}, should be one of {}".format(
35 args.scheduler, _SCHEDULERS.keys()))
36
37
38 def run(run_or_experiment,
39 name=None,
40 stop=None,
41 config=None,
42 resources_per_trial=None,
43 num_samples=1,
44 local_dir=None,
45 upload_dir=None,
46 trial_name_creator=None,
47 loggers=None,
48 sync_to_cloud=None,
49 sync_to_driver=None,
50 checkpoint_freq=0,
51 checkpoint_at_end=False,
52 global_checkpoint_period=10,
53 export_formats=None,
54 max_failures=3,
55 restore=None,
56 search_alg=None,
57 scheduler=None,
58 with_server=False,
59 server_port=TuneServer.DEFAULT_PORT,
60 verbose=2,
61 resume=False,
62 queue_trials=False,
63 reuse_actors=True,
64 trial_executor=None,
65 raise_on_failed_trial=True,
66 return_trials=True,
67 ray_auto_init=True,
68 sync_function=None):
69 """Executes training.
70
71 Args:
72 run_or_experiment (function|class|str|Experiment): If
73 function|class|str, this is the algorithm or model to train.
74 This may refer to the name of a built-on algorithm
75 (e.g. RLLib's DQN or PPO), a user-defined trainable
76 function or class, or the string identifier of a
77 trainable function or class registered in the tune registry.
78 If Experiment, then Tune will execute training based on
79 Experiment.spec.
80 name (str): Name of experiment.
81 stop (dict): The stopping criteria. The keys may be any field in
82 the return result of 'train()', whichever is reached first.
83 Defaults to empty dict.
84 config (dict): Algorithm-specific configuration for Tune variant
85 generation (e.g. env, hyperparams). Defaults to empty dict.
86 Custom search algorithms may ignore this.
87 resources_per_trial (dict): Machine resources to allocate per trial,
88 e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be
89 assigned unless you specify them here. Defaults to 1 CPU and 0
90 GPUs in ``Trainable.default_resource_request()``.
91 num_samples (int): Number of times to sample from the
92 hyperparameter space. Defaults to 1. If `grid_search` is
93 provided as an argument, the grid will be repeated
94 `num_samples` of times.
95 local_dir (str): Local dir to save training results to.
96 Defaults to ``~/ray_results``.
97 upload_dir (str): Optional URI to sync training results
98 to (e.g. ``s3://bucket``).
99 trial_name_creator (func): Optional function for generating
100 the trial string representation.
101 loggers (list): List of logger creators to be used with
102 each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.
103 See `ray/tune/logger.py`.
104 sync_to_cloud (func|str): Function for syncing the local_dir to and
105 from upload_dir. If string, then it must be a string template
106 that includes `{source}` and `{target}` for the syncer to run.
107 If not provided, the sync command defaults to standard
108 S3 or gsutil sync comamnds.
109 sync_to_driver (func|str): Function for syncing trial logdir from
110 remote node to local. If string, then it must be a string template
111 that includes `{source}` and `{target}` for the syncer to run.
112 If not provided, defaults to using rsync.
113 checkpoint_freq (int): How many training iterations between
114 checkpoints. A value of 0 (default) disables checkpointing.
115 checkpoint_at_end (bool): Whether to checkpoint at the end of the
116 experiment regardless of the checkpoint_freq. Default is False.
117 global_checkpoint_period (int): Seconds between global checkpointing.
118 This does not affect `checkpoint_freq`, which specifies frequency
119 for individual trials.
120 export_formats (list): List of formats that exported at the end of
121 the experiment. Default is None.
122 max_failures (int): Try to recover a trial from its last
123 checkpoint at least this many times. Only applies if
124 checkpointing is enabled. Setting to -1 will lead to infinite
125 recovery retries. Defaults to 3.
126 restore (str): Path to checkpoint. Only makes sense to set if
127 running 1 trial. Defaults to None.
128 search_alg (SearchAlgorithm): Search Algorithm. Defaults to
129 BasicVariantGenerator.
130 scheduler (TrialScheduler): Scheduler for executing
131 the experiment. Choose among FIFO (default), MedianStopping,
132 AsyncHyperBand, and HyperBand.
133 with_server (bool): Starts a background Tune server. Needed for
134 using the Client API.
135 server_port (int): Port number for launching TuneServer.
136 verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,
137 1 = only status updates, 2 = status and trial results.
138 resume (str|bool): One of "LOCAL", "REMOTE", "PROMPT", or bool.
139 LOCAL/True restores the checkpoint from the local_checkpoint_dir.
140 REMOTE restores the checkpoint from remote_checkpoint_dir.
141 PROMPT provides CLI feedback. False forces a new
142 experiment. If resume is set but checkpoint does not exist,
143 ValueError will be thrown.
144 queue_trials (bool): Whether to queue trials when the cluster does
145 not currently have enough resources to launch one. This should
146 be set to True when running on an autoscaling cluster to enable
147 automatic scale-up.
148 reuse_actors (bool): Whether to reuse actors between different trials
149 when possible. This can drastically speed up experiments that start
150 and stop actors often (e.g., PBT in time-multiplexing mode). This
151 requires trials to have the same resource requirements.
152 trial_executor (TrialExecutor): Manage the execution of trials.
153 raise_on_failed_trial (bool): Raise TuneError if there exists failed
154 trial (of ERROR state) when the experiments complete.
155 ray_auto_init (bool): Automatically starts a local Ray cluster
156 if using a RayTrialExecutor (which is the default) and
157 if Ray is not initialized. Defaults to True.
158 sync_function: Deprecated. See `sync_to_cloud` and
159 `sync_to_driver`.
160
161 Returns:
162 List of Trial objects.
163
164 Raises:
165 TuneError if any trials failed and `raise_on_failed_trial` is True.
166
167 Examples:
168 >>> tune.run(mytrainable, scheduler=PopulationBasedTraining())
169
170 >>> tune.run(mytrainable, num_samples=5, reuse_actors=True)
171
172 >>> tune.run(
173 "PG",
174 num_samples=5,
175 config={
176 "env": "CartPole-v0",
177 "lr": tune.sample_from(lambda _: np.random.rand())
178 }
179 )
180 """
181 trial_executor = trial_executor or RayTrialExecutor(
182 queue_trials=queue_trials,
183 reuse_actors=reuse_actors,
184 ray_auto_init=ray_auto_init)
185 experiment = run_or_experiment
186 if not isinstance(run_or_experiment, Experiment):
187 run_identifier = Experiment._register_if_needed(run_or_experiment)
188 experiment = Experiment(
189 name=name,
190 run=run_identifier,
191 stop=stop,
192 config=config,
193 resources_per_trial=resources_per_trial,
194 num_samples=num_samples,
195 local_dir=local_dir,
196 upload_dir=upload_dir,
197 sync_to_driver=sync_to_driver,
198 trial_name_creator=trial_name_creator,
199 loggers=loggers,
200 checkpoint_freq=checkpoint_freq,
201 checkpoint_at_end=checkpoint_at_end,
202 export_formats=export_formats,
203 max_failures=max_failures,
204 restore=restore,
205 sync_function=sync_function)
206 else:
207 logger.debug("Ignoring some parameters passed into tune.run.")
208
209 if sync_to_cloud:
210 assert experiment.remote_checkpoint_dir, (
211 "Need `upload_dir` if `sync_to_cloud` given.")
212
213 runner = TrialRunner(
214 search_alg=search_alg or BasicVariantGenerator(),
215 scheduler=scheduler or FIFOScheduler(),
216 local_checkpoint_dir=experiment.checkpoint_dir,
217 remote_checkpoint_dir=experiment.remote_checkpoint_dir,
218 sync_to_cloud=sync_to_cloud,
219 checkpoint_period=global_checkpoint_period,
220 resume=resume,
221 launch_web_server=with_server,
222 server_port=server_port,
223 verbose=bool(verbose > 1),
224 trial_executor=trial_executor)
225
226 runner.add_experiment(experiment)
227
228 if verbose:
229 print(runner.debug_string(max_debug=99999))
230
231 last_debug = 0
232 while not runner.is_finished():
233 runner.step()
234 if time.time() - last_debug > DEBUG_PRINT_INTERVAL:
235 if verbose:
236 print(runner.debug_string())
237 last_debug = time.time()
238
239 if verbose:
240 print(runner.debug_string(max_debug=99999))
241
242 wait_for_sync()
243
244 errored_trials = []
245 for trial in runner.get_trials():
246 if trial.status != Trial.TERMINATED:
247 errored_trials += [trial]
248
249 if errored_trials:
250 if raise_on_failed_trial:
251 raise TuneError("Trials did not complete", errored_trials)
252 else:
253 logger.error("Trials did not complete: %s", errored_trials)
254
255 if return_trials:
256 return runner.get_trials()
257 return ExperimentAnalysis(experiment.checkpoint_dir)
258
259
260 def run_experiments(experiments,
261 search_alg=None,
262 scheduler=None,
263 with_server=False,
264 server_port=TuneServer.DEFAULT_PORT,
265 verbose=2,
266 resume=False,
267 queue_trials=False,
268 reuse_actors=False,
269 trial_executor=None,
270 raise_on_failed_trial=True):
271 """Runs and blocks until all trials finish.
272
273 Examples:
274 >>> experiment_spec = Experiment("experiment", my_func)
275 >>> run_experiments(experiments=experiment_spec)
276
277 >>> experiment_spec = {"experiment": {"run": my_func}}
278 >>> run_experiments(experiments=experiment_spec)
279
280 >>> run_experiments(
281 >>> experiments=experiment_spec,
282 >>> scheduler=MedianStoppingRule(...))
283
284 >>> run_experiments(
285 >>> experiments=experiment_spec,
286 >>> search_alg=SearchAlgorithm(),
287 >>> scheduler=MedianStoppingRule(...))
288
289 Returns:
290 List of Trial objects, holding data for each executed trial.
291
292 """
293 # This is important to do this here
294 # because it schematize the experiments
295 # and it conducts the implicit registration.
296 experiments = convert_to_experiment_list(experiments)
297
298 trials = []
299 for exp in experiments:
300 trials += run(
301 exp,
302 search_alg=search_alg,
303 scheduler=scheduler,
304 with_server=with_server,
305 server_port=server_port,
306 verbose=verbose,
307 resume=resume,
308 queue_trials=queue_trials,
309 reuse_actors=reuse_actors,
310 trial_executor=trial_executor,
311 raise_on_failed_trial=raise_on_failed_trial)
312 return trials
313
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py
--- a/python/ray/tune/tune.py
+++ b/python/ray/tune/tune.py
@@ -49,6 +49,8 @@
sync_to_driver=None,
checkpoint_freq=0,
checkpoint_at_end=False,
+ keep_checkpoints_num=None,
+ checkpoint_score_attr=None,
global_checkpoint_period=10,
export_formats=None,
max_failures=3,
@@ -114,6 +116,13 @@
checkpoints. A value of 0 (default) disables checkpointing.
checkpoint_at_end (bool): Whether to checkpoint at the end of the
experiment regardless of the checkpoint_freq. Default is False.
+ keep_checkpoints_num (int): Number of checkpoints to keep. A value of
+ `None` keeps all checkpoints. Defaults to `None`. If set, need
+ to provide `checkpoint_score_attr`.
+ checkpoint_score_attr (str): Specifies by which attribute to rank the
+ best checkpoint. Default is increasing order. If attribute starts
+ with `min-` it will rank attribute in decreasing order, i.e.
+ `min-validation_loss`.
global_checkpoint_period (int): Seconds between global checkpointing.
This does not affect `checkpoint_freq`, which specifies frequency
for individual trials.
@@ -199,6 +208,8 @@
loggers=loggers,
checkpoint_freq=checkpoint_freq,
checkpoint_at_end=checkpoint_at_end,
+ keep_checkpoints_num=keep_checkpoints_num,
+ checkpoint_score_attr=checkpoint_score_attr,
export_formats=export_formats,
max_failures=max_failures,
restore=restore,
| {"golden_diff": "diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py\n--- a/python/ray/tune/tune.py\n+++ b/python/ray/tune/tune.py\n@@ -49,6 +49,8 @@\n sync_to_driver=None,\n checkpoint_freq=0,\n checkpoint_at_end=False,\n+ keep_checkpoints_num=None,\n+ checkpoint_score_attr=None,\n global_checkpoint_period=10,\n export_formats=None,\n max_failures=3,\n@@ -114,6 +116,13 @@\n checkpoints. A value of 0 (default) disables checkpointing.\n checkpoint_at_end (bool): Whether to checkpoint at the end of the\n experiment regardless of the checkpoint_freq. Default is False.\n+ keep_checkpoints_num (int): Number of checkpoints to keep. A value of\n+ `None` keeps all checkpoints. Defaults to `None`. If set, need\n+ to provide `checkpoint_score_attr`.\n+ checkpoint_score_attr (str): Specifies by which attribute to rank the\n+ best checkpoint. Default is increasing order. If attribute starts\n+ with `min-` it will rank attribute in decreasing order, i.e.\n+ `min-validation_loss`.\n global_checkpoint_period (int): Seconds between global checkpointing.\n This does not affect `checkpoint_freq`, which specifies frequency\n for individual trials.\n@@ -199,6 +208,8 @@\n loggers=loggers,\n checkpoint_freq=checkpoint_freq,\n checkpoint_at_end=checkpoint_at_end,\n+ keep_checkpoints_num=keep_checkpoints_num,\n+ checkpoint_score_attr=checkpoint_score_attr,\n export_formats=export_formats,\n max_failures=max_failures,\n restore=restore,\n", "issue": "[tune] Missing keep_checkpoints_num arg in tune\nHi everybody,\r\n\r\nThanks a lot for this lib, I've been using for the past few weeks with TF2 and it works pretty well!\r\n\r\nIn my current use case, I'm checkpointing regularly to ensure a minimal loss in the face of OOM. I do control TF checkpoints manually to avoid filling my disk space but I still need to dump some ray metadata (iterations count, etc.).\r\nI've noticed that ray create folders like `checkpoint_{iter}` to store those but from `tune` there is no way to tell him that I don't need to keep dozens of folder.\r\nI also noticed that `experiment`, and `trial` classes have the perfect argument: `keep_checkpoints_num`.\r\n\r\nSadly this argument is missing from the `tune` config. \r\nIs there any reasons why it is missing from `tune` arguments?\r\n\r\nHave a nice day.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport time\n\nfrom ray.tune.error import TuneError\nfrom ray.tune.experiment import convert_to_experiment_list, Experiment\nfrom ray.tune.analysis import ExperimentAnalysis\nfrom ray.tune.suggest import BasicVariantGenerator\nfrom ray.tune.trial import Trial, DEBUG_PRINT_INTERVAL\nfrom ray.tune.ray_trial_executor import RayTrialExecutor\nfrom ray.tune.syncer import wait_for_sync\nfrom ray.tune.trial_runner import TrialRunner\nfrom ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,\n FIFOScheduler, MedianStoppingRule)\nfrom ray.tune.web_server import TuneServer\n\nlogger = logging.getLogger(__name__)\n\n_SCHEDULERS = {\n \"FIFO\": FIFOScheduler,\n \"MedianStopping\": MedianStoppingRule,\n \"HyperBand\": HyperBandScheduler,\n \"AsyncHyperBand\": AsyncHyperBandScheduler,\n}\n\n\ndef _make_scheduler(args):\n if args.scheduler in _SCHEDULERS:\n return _SCHEDULERS[args.scheduler](**args.scheduler_config)\n else:\n raise TuneError(\"Unknown scheduler: {}, should be one of {}\".format(\n args.scheduler, _SCHEDULERS.keys()))\n\n\ndef run(run_or_experiment,\n name=None,\n stop=None,\n config=None,\n resources_per_trial=None,\n num_samples=1,\n local_dir=None,\n upload_dir=None,\n trial_name_creator=None,\n loggers=None,\n sync_to_cloud=None,\n sync_to_driver=None,\n checkpoint_freq=0,\n checkpoint_at_end=False,\n global_checkpoint_period=10,\n export_formats=None,\n max_failures=3,\n restore=None,\n search_alg=None,\n scheduler=None,\n with_server=False,\n server_port=TuneServer.DEFAULT_PORT,\n verbose=2,\n resume=False,\n queue_trials=False,\n reuse_actors=True,\n trial_executor=None,\n raise_on_failed_trial=True,\n return_trials=True,\n ray_auto_init=True,\n sync_function=None):\n \"\"\"Executes training.\n\n Args:\n run_or_experiment (function|class|str|Experiment): If\n function|class|str, this is the algorithm or model to train.\n This may refer to the name of a built-on algorithm\n (e.g. RLLib's DQN or PPO), a user-defined trainable\n function or class, or the string identifier of a\n trainable function or class registered in the tune registry.\n If Experiment, then Tune will execute training based on\n Experiment.spec.\n name (str): Name of experiment.\n stop (dict): The stopping criteria. The keys may be any field in\n the return result of 'train()', whichever is reached first.\n Defaults to empty dict.\n config (dict): Algorithm-specific configuration for Tune variant\n generation (e.g. env, hyperparams). Defaults to empty dict.\n Custom search algorithms may ignore this.\n resources_per_trial (dict): Machine resources to allocate per trial,\n e.g. ``{\"cpu\": 64, \"gpu\": 8}``. Note that GPUs will not be\n assigned unless you specify them here. Defaults to 1 CPU and 0\n GPUs in ``Trainable.default_resource_request()``.\n num_samples (int): Number of times to sample from the\n hyperparameter space. Defaults to 1. If `grid_search` is\n provided as an argument, the grid will be repeated\n `num_samples` of times.\n local_dir (str): Local dir to save training results to.\n Defaults to ``~/ray_results``.\n upload_dir (str): Optional URI to sync training results\n to (e.g. ``s3://bucket``).\n trial_name_creator (func): Optional function for generating\n the trial string representation.\n loggers (list): List of logger creators to be used with\n each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.\n See `ray/tune/logger.py`.\n sync_to_cloud (func|str): Function for syncing the local_dir to and\n from upload_dir. If string, then it must be a string template\n that includes `{source}` and `{target}` for the syncer to run.\n If not provided, the sync command defaults to standard\n S3 or gsutil sync comamnds.\n sync_to_driver (func|str): Function for syncing trial logdir from\n remote node to local. If string, then it must be a string template\n that includes `{source}` and `{target}` for the syncer to run.\n If not provided, defaults to using rsync.\n checkpoint_freq (int): How many training iterations between\n checkpoints. A value of 0 (default) disables checkpointing.\n checkpoint_at_end (bool): Whether to checkpoint at the end of the\n experiment regardless of the checkpoint_freq. Default is False.\n global_checkpoint_period (int): Seconds between global checkpointing.\n This does not affect `checkpoint_freq`, which specifies frequency\n for individual trials.\n export_formats (list): List of formats that exported at the end of\n the experiment. Default is None.\n max_failures (int): Try to recover a trial from its last\n checkpoint at least this many times. Only applies if\n checkpointing is enabled. Setting to -1 will lead to infinite\n recovery retries. Defaults to 3.\n restore (str): Path to checkpoint. Only makes sense to set if\n running 1 trial. Defaults to None.\n search_alg (SearchAlgorithm): Search Algorithm. Defaults to\n BasicVariantGenerator.\n scheduler (TrialScheduler): Scheduler for executing\n the experiment. Choose among FIFO (default), MedianStopping,\n AsyncHyperBand, and HyperBand.\n with_server (bool): Starts a background Tune server. Needed for\n using the Client API.\n server_port (int): Port number for launching TuneServer.\n verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,\n 1 = only status updates, 2 = status and trial results.\n resume (str|bool): One of \"LOCAL\", \"REMOTE\", \"PROMPT\", or bool.\n LOCAL/True restores the checkpoint from the local_checkpoint_dir.\n REMOTE restores the checkpoint from remote_checkpoint_dir.\n PROMPT provides CLI feedback. False forces a new\n experiment. If resume is set but checkpoint does not exist,\n ValueError will be thrown.\n queue_trials (bool): Whether to queue trials when the cluster does\n not currently have enough resources to launch one. This should\n be set to True when running on an autoscaling cluster to enable\n automatic scale-up.\n reuse_actors (bool): Whether to reuse actors between different trials\n when possible. This can drastically speed up experiments that start\n and stop actors often (e.g., PBT in time-multiplexing mode). This\n requires trials to have the same resource requirements.\n trial_executor (TrialExecutor): Manage the execution of trials.\n raise_on_failed_trial (bool): Raise TuneError if there exists failed\n trial (of ERROR state) when the experiments complete.\n ray_auto_init (bool): Automatically starts a local Ray cluster\n if using a RayTrialExecutor (which is the default) and\n if Ray is not initialized. Defaults to True.\n sync_function: Deprecated. See `sync_to_cloud` and\n `sync_to_driver`.\n\n Returns:\n List of Trial objects.\n\n Raises:\n TuneError if any trials failed and `raise_on_failed_trial` is True.\n\n Examples:\n >>> tune.run(mytrainable, scheduler=PopulationBasedTraining())\n\n >>> tune.run(mytrainable, num_samples=5, reuse_actors=True)\n\n >>> tune.run(\n \"PG\",\n num_samples=5,\n config={\n \"env\": \"CartPole-v0\",\n \"lr\": tune.sample_from(lambda _: np.random.rand())\n }\n )\n \"\"\"\n trial_executor = trial_executor or RayTrialExecutor(\n queue_trials=queue_trials,\n reuse_actors=reuse_actors,\n ray_auto_init=ray_auto_init)\n experiment = run_or_experiment\n if not isinstance(run_or_experiment, Experiment):\n run_identifier = Experiment._register_if_needed(run_or_experiment)\n experiment = Experiment(\n name=name,\n run=run_identifier,\n stop=stop,\n config=config,\n resources_per_trial=resources_per_trial,\n num_samples=num_samples,\n local_dir=local_dir,\n upload_dir=upload_dir,\n sync_to_driver=sync_to_driver,\n trial_name_creator=trial_name_creator,\n loggers=loggers,\n checkpoint_freq=checkpoint_freq,\n checkpoint_at_end=checkpoint_at_end,\n export_formats=export_formats,\n max_failures=max_failures,\n restore=restore,\n sync_function=sync_function)\n else:\n logger.debug(\"Ignoring some parameters passed into tune.run.\")\n\n if sync_to_cloud:\n assert experiment.remote_checkpoint_dir, (\n \"Need `upload_dir` if `sync_to_cloud` given.\")\n\n runner = TrialRunner(\n search_alg=search_alg or BasicVariantGenerator(),\n scheduler=scheduler or FIFOScheduler(),\n local_checkpoint_dir=experiment.checkpoint_dir,\n remote_checkpoint_dir=experiment.remote_checkpoint_dir,\n sync_to_cloud=sync_to_cloud,\n checkpoint_period=global_checkpoint_period,\n resume=resume,\n launch_web_server=with_server,\n server_port=server_port,\n verbose=bool(verbose > 1),\n trial_executor=trial_executor)\n\n runner.add_experiment(experiment)\n\n if verbose:\n print(runner.debug_string(max_debug=99999))\n\n last_debug = 0\n while not runner.is_finished():\n runner.step()\n if time.time() - last_debug > DEBUG_PRINT_INTERVAL:\n if verbose:\n print(runner.debug_string())\n last_debug = time.time()\n\n if verbose:\n print(runner.debug_string(max_debug=99999))\n\n wait_for_sync()\n\n errored_trials = []\n for trial in runner.get_trials():\n if trial.status != Trial.TERMINATED:\n errored_trials += [trial]\n\n if errored_trials:\n if raise_on_failed_trial:\n raise TuneError(\"Trials did not complete\", errored_trials)\n else:\n logger.error(\"Trials did not complete: %s\", errored_trials)\n\n if return_trials:\n return runner.get_trials()\n return ExperimentAnalysis(experiment.checkpoint_dir)\n\n\ndef run_experiments(experiments,\n search_alg=None,\n scheduler=None,\n with_server=False,\n server_port=TuneServer.DEFAULT_PORT,\n verbose=2,\n resume=False,\n queue_trials=False,\n reuse_actors=False,\n trial_executor=None,\n raise_on_failed_trial=True):\n \"\"\"Runs and blocks until all trials finish.\n\n Examples:\n >>> experiment_spec = Experiment(\"experiment\", my_func)\n >>> run_experiments(experiments=experiment_spec)\n\n >>> experiment_spec = {\"experiment\": {\"run\": my_func}}\n >>> run_experiments(experiments=experiment_spec)\n\n >>> run_experiments(\n >>> experiments=experiment_spec,\n >>> scheduler=MedianStoppingRule(...))\n\n >>> run_experiments(\n >>> experiments=experiment_spec,\n >>> search_alg=SearchAlgorithm(),\n >>> scheduler=MedianStoppingRule(...))\n\n Returns:\n List of Trial objects, holding data for each executed trial.\n\n \"\"\"\n # This is important to do this here\n # because it schematize the experiments\n # and it conducts the implicit registration.\n experiments = convert_to_experiment_list(experiments)\n\n trials = []\n for exp in experiments:\n trials += run(\n exp,\n search_alg=search_alg,\n scheduler=scheduler,\n with_server=with_server,\n server_port=server_port,\n verbose=verbose,\n resume=resume,\n queue_trials=queue_trials,\n reuse_actors=reuse_actors,\n trial_executor=trial_executor,\n raise_on_failed_trial=raise_on_failed_trial)\n return trials\n", "path": "python/ray/tune/tune.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport time\n\nfrom ray.tune.error import TuneError\nfrom ray.tune.experiment import convert_to_experiment_list, Experiment\nfrom ray.tune.analysis import ExperimentAnalysis\nfrom ray.tune.suggest import BasicVariantGenerator\nfrom ray.tune.trial import Trial, DEBUG_PRINT_INTERVAL\nfrom ray.tune.ray_trial_executor import RayTrialExecutor\nfrom ray.tune.syncer import wait_for_sync\nfrom ray.tune.trial_runner import TrialRunner\nfrom ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,\n FIFOScheduler, MedianStoppingRule)\nfrom ray.tune.web_server import TuneServer\n\nlogger = logging.getLogger(__name__)\n\n_SCHEDULERS = {\n \"FIFO\": FIFOScheduler,\n \"MedianStopping\": MedianStoppingRule,\n \"HyperBand\": HyperBandScheduler,\n \"AsyncHyperBand\": AsyncHyperBandScheduler,\n}\n\n\ndef _make_scheduler(args):\n if args.scheduler in _SCHEDULERS:\n return _SCHEDULERS[args.scheduler](**args.scheduler_config)\n else:\n raise TuneError(\"Unknown scheduler: {}, should be one of {}\".format(\n args.scheduler, _SCHEDULERS.keys()))\n\n\ndef run(run_or_experiment,\n name=None,\n stop=None,\n config=None,\n resources_per_trial=None,\n num_samples=1,\n local_dir=None,\n upload_dir=None,\n trial_name_creator=None,\n loggers=None,\n sync_to_cloud=None,\n sync_to_driver=None,\n checkpoint_freq=0,\n checkpoint_at_end=False,\n keep_checkpoints_num=None,\n checkpoint_score_attr=None,\n global_checkpoint_period=10,\n export_formats=None,\n max_failures=3,\n restore=None,\n search_alg=None,\n scheduler=None,\n with_server=False,\n server_port=TuneServer.DEFAULT_PORT,\n verbose=2,\n resume=False,\n queue_trials=False,\n reuse_actors=True,\n trial_executor=None,\n raise_on_failed_trial=True,\n return_trials=True,\n ray_auto_init=True,\n sync_function=None):\n \"\"\"Executes training.\n\n Args:\n run_or_experiment (function|class|str|Experiment): If\n function|class|str, this is the algorithm or model to train.\n This may refer to the name of a built-on algorithm\n (e.g. RLLib's DQN or PPO), a user-defined trainable\n function or class, or the string identifier of a\n trainable function or class registered in the tune registry.\n If Experiment, then Tune will execute training based on\n Experiment.spec.\n name (str): Name of experiment.\n stop (dict): The stopping criteria. The keys may be any field in\n the return result of 'train()', whichever is reached first.\n Defaults to empty dict.\n config (dict): Algorithm-specific configuration for Tune variant\n generation (e.g. env, hyperparams). Defaults to empty dict.\n Custom search algorithms may ignore this.\n resources_per_trial (dict): Machine resources to allocate per trial,\n e.g. ``{\"cpu\": 64, \"gpu\": 8}``. Note that GPUs will not be\n assigned unless you specify them here. Defaults to 1 CPU and 0\n GPUs in ``Trainable.default_resource_request()``.\n num_samples (int): Number of times to sample from the\n hyperparameter space. Defaults to 1. If `grid_search` is\n provided as an argument, the grid will be repeated\n `num_samples` of times.\n local_dir (str): Local dir to save training results to.\n Defaults to ``~/ray_results``.\n upload_dir (str): Optional URI to sync training results\n to (e.g. ``s3://bucket``).\n trial_name_creator (func): Optional function for generating\n the trial string representation.\n loggers (list): List of logger creators to be used with\n each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.\n See `ray/tune/logger.py`.\n sync_to_cloud (func|str): Function for syncing the local_dir to and\n from upload_dir. If string, then it must be a string template\n that includes `{source}` and `{target}` for the syncer to run.\n If not provided, the sync command defaults to standard\n S3 or gsutil sync comamnds.\n sync_to_driver (func|str): Function for syncing trial logdir from\n remote node to local. If string, then it must be a string template\n that includes `{source}` and `{target}` for the syncer to run.\n If not provided, defaults to using rsync.\n checkpoint_freq (int): How many training iterations between\n checkpoints. A value of 0 (default) disables checkpointing.\n checkpoint_at_end (bool): Whether to checkpoint at the end of the\n experiment regardless of the checkpoint_freq. Default is False.\n keep_checkpoints_num (int): Number of checkpoints to keep. A value of\n `None` keeps all checkpoints. Defaults to `None`. If set, need\n to provide `checkpoint_score_attr`.\n checkpoint_score_attr (str): Specifies by which attribute to rank the\n best checkpoint. Default is increasing order. If attribute starts\n with `min-` it will rank attribute in decreasing order, i.e.\n `min-validation_loss`.\n global_checkpoint_period (int): Seconds between global checkpointing.\n This does not affect `checkpoint_freq`, which specifies frequency\n for individual trials.\n export_formats (list): List of formats that exported at the end of\n the experiment. Default is None.\n max_failures (int): Try to recover a trial from its last\n checkpoint at least this many times. Only applies if\n checkpointing is enabled. Setting to -1 will lead to infinite\n recovery retries. Defaults to 3.\n restore (str): Path to checkpoint. Only makes sense to set if\n running 1 trial. Defaults to None.\n search_alg (SearchAlgorithm): Search Algorithm. Defaults to\n BasicVariantGenerator.\n scheduler (TrialScheduler): Scheduler for executing\n the experiment. Choose among FIFO (default), MedianStopping,\n AsyncHyperBand, and HyperBand.\n with_server (bool): Starts a background Tune server. Needed for\n using the Client API.\n server_port (int): Port number for launching TuneServer.\n verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,\n 1 = only status updates, 2 = status and trial results.\n resume (str|bool): One of \"LOCAL\", \"REMOTE\", \"PROMPT\", or bool.\n LOCAL/True restores the checkpoint from the local_checkpoint_dir.\n REMOTE restores the checkpoint from remote_checkpoint_dir.\n PROMPT provides CLI feedback. False forces a new\n experiment. If resume is set but checkpoint does not exist,\n ValueError will be thrown.\n queue_trials (bool): Whether to queue trials when the cluster does\n not currently have enough resources to launch one. This should\n be set to True when running on an autoscaling cluster to enable\n automatic scale-up.\n reuse_actors (bool): Whether to reuse actors between different trials\n when possible. This can drastically speed up experiments that start\n and stop actors often (e.g., PBT in time-multiplexing mode). This\n requires trials to have the same resource requirements.\n trial_executor (TrialExecutor): Manage the execution of trials.\n raise_on_failed_trial (bool): Raise TuneError if there exists failed\n trial (of ERROR state) when the experiments complete.\n ray_auto_init (bool): Automatically starts a local Ray cluster\n if using a RayTrialExecutor (which is the default) and\n if Ray is not initialized. Defaults to True.\n sync_function: Deprecated. See `sync_to_cloud` and\n `sync_to_driver`.\n\n Returns:\n List of Trial objects.\n\n Raises:\n TuneError if any trials failed and `raise_on_failed_trial` is True.\n\n Examples:\n >>> tune.run(mytrainable, scheduler=PopulationBasedTraining())\n\n >>> tune.run(mytrainable, num_samples=5, reuse_actors=True)\n\n >>> tune.run(\n \"PG\",\n num_samples=5,\n config={\n \"env\": \"CartPole-v0\",\n \"lr\": tune.sample_from(lambda _: np.random.rand())\n }\n )\n \"\"\"\n trial_executor = trial_executor or RayTrialExecutor(\n queue_trials=queue_trials,\n reuse_actors=reuse_actors,\n ray_auto_init=ray_auto_init)\n experiment = run_or_experiment\n if not isinstance(run_or_experiment, Experiment):\n run_identifier = Experiment._register_if_needed(run_or_experiment)\n experiment = Experiment(\n name=name,\n run=run_identifier,\n stop=stop,\n config=config,\n resources_per_trial=resources_per_trial,\n num_samples=num_samples,\n local_dir=local_dir,\n upload_dir=upload_dir,\n sync_to_driver=sync_to_driver,\n trial_name_creator=trial_name_creator,\n loggers=loggers,\n checkpoint_freq=checkpoint_freq,\n checkpoint_at_end=checkpoint_at_end,\n keep_checkpoints_num=keep_checkpoints_num,\n checkpoint_score_attr=checkpoint_score_attr,\n export_formats=export_formats,\n max_failures=max_failures,\n restore=restore,\n sync_function=sync_function)\n else:\n logger.debug(\"Ignoring some parameters passed into tune.run.\")\n\n if sync_to_cloud:\n assert experiment.remote_checkpoint_dir, (\n \"Need `upload_dir` if `sync_to_cloud` given.\")\n\n runner = TrialRunner(\n search_alg=search_alg or BasicVariantGenerator(),\n scheduler=scheduler or FIFOScheduler(),\n local_checkpoint_dir=experiment.checkpoint_dir,\n remote_checkpoint_dir=experiment.remote_checkpoint_dir,\n sync_to_cloud=sync_to_cloud,\n checkpoint_period=global_checkpoint_period,\n resume=resume,\n launch_web_server=with_server,\n server_port=server_port,\n verbose=bool(verbose > 1),\n trial_executor=trial_executor)\n\n runner.add_experiment(experiment)\n\n if verbose:\n print(runner.debug_string(max_debug=99999))\n\n last_debug = 0\n while not runner.is_finished():\n runner.step()\n if time.time() - last_debug > DEBUG_PRINT_INTERVAL:\n if verbose:\n print(runner.debug_string())\n last_debug = time.time()\n\n if verbose:\n print(runner.debug_string(max_debug=99999))\n\n wait_for_sync()\n\n errored_trials = []\n for trial in runner.get_trials():\n if trial.status != Trial.TERMINATED:\n errored_trials += [trial]\n\n if errored_trials:\n if raise_on_failed_trial:\n raise TuneError(\"Trials did not complete\", errored_trials)\n else:\n logger.error(\"Trials did not complete: %s\", errored_trials)\n\n if return_trials:\n return runner.get_trials()\n return ExperimentAnalysis(experiment.checkpoint_dir)\n\n\ndef run_experiments(experiments,\n search_alg=None,\n scheduler=None,\n with_server=False,\n server_port=TuneServer.DEFAULT_PORT,\n verbose=2,\n resume=False,\n queue_trials=False,\n reuse_actors=False,\n trial_executor=None,\n raise_on_failed_trial=True):\n \"\"\"Runs and blocks until all trials finish.\n\n Examples:\n >>> experiment_spec = Experiment(\"experiment\", my_func)\n >>> run_experiments(experiments=experiment_spec)\n\n >>> experiment_spec = {\"experiment\": {\"run\": my_func}}\n >>> run_experiments(experiments=experiment_spec)\n\n >>> run_experiments(\n >>> experiments=experiment_spec,\n >>> scheduler=MedianStoppingRule(...))\n\n >>> run_experiments(\n >>> experiments=experiment_spec,\n >>> search_alg=SearchAlgorithm(),\n >>> scheduler=MedianStoppingRule(...))\n\n Returns:\n List of Trial objects, holding data for each executed trial.\n\n \"\"\"\n # This is important to do this here\n # because it schematize the experiments\n # and it conducts the implicit registration.\n experiments = convert_to_experiment_list(experiments)\n\n trials = []\n for exp in experiments:\n trials += run(\n exp,\n search_alg=search_alg,\n scheduler=scheduler,\n with_server=with_server,\n server_port=server_port,\n verbose=verbose,\n resume=resume,\n queue_trials=queue_trials,\n reuse_actors=reuse_actors,\n trial_executor=trial_executor,\n raise_on_failed_trial=raise_on_failed_trial)\n return trials\n", "path": "python/ray/tune/tune.py"}]} | 3,884 | 378 |
gh_patches_debug_7669 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-460 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FieldError when accessing event
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
When opening the editor page for an event, a `FieldError` occurs.
### Steps to Reproduce
1. Go to 'Events'
2. Click on an event or on the 'Create event' button
3. Error occurs (see screenshot below)
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
Event editor page opens up
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
Error page is loaded
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/views/events/event_view.py`
Content:
```
1 import logging
2
3 from django.contrib import messages
4 from django.contrib.auth.decorators import login_required
5 from django.contrib.auth.mixins import PermissionRequiredMixin
6 from django.core.exceptions import PermissionDenied
7 from django.shortcuts import render, redirect
8 from django.utils.decorators import method_decorator
9 from django.utils.translation import ugettext as _
10 from django.views.generic import TemplateView
11
12 from ...constants import status
13 from ...decorators import region_permission_required
14 from ...forms.events import EventForm, EventTranslationForm, RecurrenceRuleForm
15 from ...models import Region, Language, Event, EventTranslation, RecurrenceRule, POI
16
17 logger = logging.getLogger(__name__)
18
19
20 @method_decorator(login_required, name="dispatch")
21 @method_decorator(region_permission_required, name="dispatch")
22 class EventView(PermissionRequiredMixin, TemplateView):
23 permission_required = "cms.view_events"
24 raise_exception = True
25
26 template_name = "events/event_form.html"
27
28 # pylint: disable=too-many-locals
29 def get(self, request, *args, **kwargs):
30 language = Language.objects.get(code=kwargs.get("language_code"))
31
32 # get event and event translation objects if they exist, otherwise objects are None
33 event_instance = Event.objects.filter(id=kwargs.get("event_id")).first()
34 event_translation_instance = EventTranslation.objects.filter(
35 event=event_instance, language=language
36 ).first()
37 recurrence_rule_instance = RecurrenceRule.objects.filter(
38 event=event_instance
39 ).first()
40 poi_instance = POI.objects.filter(event=event_instance).first()
41
42 # Make form disabled if user has no permission to edit the page
43 if not request.user.has_perm("cms.edit_events"):
44 disabled = True
45 messages.warning(
46 request, _("You don't have the permission to edit this event.")
47 )
48 elif event_instance and event_instance.archived:
49 disabled = True
50 messages.warning(
51 request, _("You cannot edit this event because it is archived.")
52 )
53 else:
54 disabled = False
55
56 event_form = EventForm(instance=event_instance, disabled=disabled)
57 event_translation_form = EventTranslationForm(
58 instance=event_translation_instance, disabled=disabled
59 )
60 recurrence_rule_form = RecurrenceRuleForm(
61 instance=recurrence_rule_instance, disabled=disabled
62 )
63
64 return render(
65 request,
66 self.template_name,
67 {
68 "current_menu_item": "events",
69 "event_form": event_form,
70 "event_translation_form": event_translation_form,
71 "recurrence_rule_form": recurrence_rule_form,
72 "poi": poi_instance,
73 "language": language,
74 "languages": Region.get_current_region(request).languages
75 if event_instance
76 else [language],
77 },
78 )
79
80 # pylint: disable=too-many-locals,too-many-branches
81 def post(self, request, **kwargs):
82 region = Region.objects.get(slug=kwargs.get("region_slug"))
83 language = Language.objects.get(code=kwargs.get("language_code"))
84 poi = POI.objects.filter(id=request.POST.get("poi_id")).first()
85
86 event_instance = Event.objects.filter(id=kwargs.get("event_id")).first()
87 recurrence_rule_instance = RecurrenceRule.objects.filter(
88 event=event_instance
89 ).first()
90 event_translation_instance = EventTranslation.objects.filter(
91 event=event_instance, language=language
92 ).first()
93
94 if not request.user.has_perm("cms.edit_events"):
95 raise PermissionDenied
96
97 event_form = EventForm(data=request.POST, instance=event_instance,)
98 # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation
99 event_form_valid = event_form.is_valid()
100 recurrence_rule_form = RecurrenceRuleForm(
101 data=request.POST,
102 instance=recurrence_rule_instance,
103 event_start_date=event_form.cleaned_data.get("start_date", None),
104 )
105 event_translation_form = EventTranslationForm(
106 data=request.POST,
107 instance=event_translation_instance,
108 region=region,
109 language=language,
110 )
111
112 if (
113 not event_form_valid
114 or not event_translation_form.is_valid()
115 or (
116 event_form.cleaned_data["is_recurring"]
117 and not recurrence_rule_form.is_valid()
118 )
119 ):
120 forms = [event_form, event_translation_form]
121 if event_form.cleaned_data["is_recurring"]:
122 forms.append(recurrence_rule_form)
123 # Add error messages
124 for form in forms:
125 for field in form:
126 for error in field.errors:
127 messages.error(request, _(error))
128 for error in form.non_field_errors():
129 messages.error(request, _(error))
130
131 elif (
132 not event_form.has_changed()
133 and not event_translation_form.has_changed()
134 and (
135 not event_form.cleaned_data["is_recurring"]
136 or not recurrence_rule_form.has_changed()
137 )
138 and poi == event_instance.location
139 ):
140
141 messages.info(request, _("No changes detected."))
142
143 else:
144
145 if event_translation_form.instance.status == status.PUBLIC:
146 if not request.user.has_perm("cms.publish_events"):
147 raise PermissionDenied
148
149 if event_form.cleaned_data["is_recurring"]:
150 recurrence_rule = recurrence_rule_form.save()
151 else:
152 recurrence_rule = None
153
154 event = event_form.save(
155 region=region, recurrence_rule=recurrence_rule, location=poi
156 )
157 event_translation = event_translation_form.save(
158 event=event, user=request.user
159 )
160
161 published = event_translation.status == status.PUBLIC
162 if not event_instance:
163 if published:
164 messages.success(
165 request, _("Event was successfully created and published.")
166 )
167 else:
168 messages.success(request, _("Event was successfully created."))
169 return redirect(
170 "edit_event",
171 **{
172 "event_id": event.id,
173 "region_slug": region.slug,
174 "language_code": language.code,
175 }
176 )
177 if not event_translation_instance:
178 if published:
179 messages.success(
180 request,
181 _("Event translation was successfully created and published."),
182 )
183 else:
184 messages.success(
185 request, _("Event translation was successfully created.")
186 )
187 else:
188 if published:
189 messages.success(request, _("Event was successfully published."))
190 else:
191 messages.success(request, _("Event was successfully saved."))
192
193 return render(
194 request,
195 self.template_name,
196 {
197 "current_menu_item": "events",
198 "event_form": event_form,
199 "event_translation_form": event_translation_form,
200 "recurrence_rule_form": recurrence_rule_form,
201 "poi": poi,
202 "language": language,
203 "languages": region.languages if event_instance else [language],
204 },
205 )
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cms/views/events/event_view.py b/src/cms/views/events/event_view.py
--- a/src/cms/views/events/event_view.py
+++ b/src/cms/views/events/event_view.py
@@ -37,7 +37,7 @@
recurrence_rule_instance = RecurrenceRule.objects.filter(
event=event_instance
).first()
- poi_instance = POI.objects.filter(event=event_instance).first()
+ poi_instance = POI.objects.filter(events=event_instance).first()
# Make form disabled if user has no permission to edit the page
if not request.user.has_perm("cms.edit_events"):
| {"golden_diff": "diff --git a/src/cms/views/events/event_view.py b/src/cms/views/events/event_view.py\n--- a/src/cms/views/events/event_view.py\n+++ b/src/cms/views/events/event_view.py\n@@ -37,7 +37,7 @@\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n- poi_instance = POI.objects.filter(event=event_instance).first()\n+ poi_instance = POI.objects.filter(events=event_instance).first()\n \n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm(\"cms.edit_events\"):\n", "issue": "FieldError when accessing event\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen opening the editor page for an event, a `FieldError` occurs.\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to 'Events'\r\n2. Click on an event or on the 'Create event' button\r\n3. Error occurs (see screenshot below)\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nEvent editor page opens up\r\n\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nError page is loaded \r\n\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.events import EventForm, EventTranslationForm, RecurrenceRuleForm\nfrom ...models import Region, Language, Event, EventTranslation, RecurrenceRule, POI\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(region_permission_required, name=\"dispatch\")\nclass EventView(PermissionRequiredMixin, TemplateView):\n permission_required = \"cms.view_events\"\n raise_exception = True\n\n template_name = \"events/event_form.html\"\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n language = Language.objects.get(code=kwargs.get(\"language_code\"))\n\n # get event and event translation objects if they exist, otherwise objects are None\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n poi_instance = POI.objects.filter(event=event_instance).first()\n\n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm(\"cms.edit_events\"):\n disabled = True\n messages.warning(\n request, _(\"You don't have the permission to edit this event.\")\n )\n elif event_instance and event_instance.archived:\n disabled = True\n messages.warning(\n request, _(\"You cannot edit this event because it is archived.\")\n )\n else:\n disabled = False\n\n event_form = EventForm(instance=event_instance, disabled=disabled)\n event_translation_form = EventTranslationForm(\n instance=event_translation_instance, disabled=disabled\n )\n recurrence_rule_form = RecurrenceRuleForm(\n instance=recurrence_rule_instance, disabled=disabled\n )\n\n return render(\n request,\n self.template_name,\n {\n \"current_menu_item\": \"events\",\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi_instance,\n \"language\": language,\n \"languages\": Region.get_current_region(request).languages\n if event_instance\n else [language],\n },\n )\n\n # pylint: disable=too-many-locals,too-many-branches\n def post(self, request, **kwargs):\n region = Region.objects.get(slug=kwargs.get(\"region_slug\"))\n language = Language.objects.get(code=kwargs.get(\"language_code\"))\n poi = POI.objects.filter(id=request.POST.get(\"poi_id\")).first()\n\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n\n if not request.user.has_perm(\"cms.edit_events\"):\n raise PermissionDenied\n\n event_form = EventForm(data=request.POST, instance=event_instance,)\n # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation\n event_form_valid = event_form.is_valid()\n recurrence_rule_form = RecurrenceRuleForm(\n data=request.POST,\n instance=recurrence_rule_instance,\n event_start_date=event_form.cleaned_data.get(\"start_date\", None),\n )\n event_translation_form = EventTranslationForm(\n data=request.POST,\n instance=event_translation_instance,\n region=region,\n language=language,\n )\n\n if (\n not event_form_valid\n or not event_translation_form.is_valid()\n or (\n event_form.cleaned_data[\"is_recurring\"]\n and not recurrence_rule_form.is_valid()\n )\n ):\n forms = [event_form, event_translation_form]\n if event_form.cleaned_data[\"is_recurring\"]:\n forms.append(recurrence_rule_form)\n # Add error messages\n for form in forms:\n for field in form:\n for error in field.errors:\n messages.error(request, _(error))\n for error in form.non_field_errors():\n messages.error(request, _(error))\n\n elif (\n not event_form.has_changed()\n and not event_translation_form.has_changed()\n and (\n not event_form.cleaned_data[\"is_recurring\"]\n or not recurrence_rule_form.has_changed()\n )\n and poi == event_instance.location\n ):\n\n messages.info(request, _(\"No changes detected.\"))\n\n else:\n\n if event_translation_form.instance.status == status.PUBLIC:\n if not request.user.has_perm(\"cms.publish_events\"):\n raise PermissionDenied\n\n if event_form.cleaned_data[\"is_recurring\"]:\n recurrence_rule = recurrence_rule_form.save()\n else:\n recurrence_rule = None\n\n event = event_form.save(\n region=region, recurrence_rule=recurrence_rule, location=poi\n )\n event_translation = event_translation_form.save(\n event=event, user=request.user\n )\n\n published = event_translation.status == status.PUBLIC\n if not event_instance:\n if published:\n messages.success(\n request, _(\"Event was successfully created and published.\")\n )\n else:\n messages.success(request, _(\"Event was successfully created.\"))\n return redirect(\n \"edit_event\",\n **{\n \"event_id\": event.id,\n \"region_slug\": region.slug,\n \"language_code\": language.code,\n }\n )\n if not event_translation_instance:\n if published:\n messages.success(\n request,\n _(\"Event translation was successfully created and published.\"),\n )\n else:\n messages.success(\n request, _(\"Event translation was successfully created.\")\n )\n else:\n if published:\n messages.success(request, _(\"Event was successfully published.\"))\n else:\n messages.success(request, _(\"Event was successfully saved.\"))\n\n return render(\n request,\n self.template_name,\n {\n \"current_menu_item\": \"events\",\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi,\n \"language\": language,\n \"languages\": region.languages if event_instance else [language],\n },\n )\n", "path": "src/cms/views/events/event_view.py"}], "after_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.events import EventForm, EventTranslationForm, RecurrenceRuleForm\nfrom ...models import Region, Language, Event, EventTranslation, RecurrenceRule, POI\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(region_permission_required, name=\"dispatch\")\nclass EventView(PermissionRequiredMixin, TemplateView):\n permission_required = \"cms.view_events\"\n raise_exception = True\n\n template_name = \"events/event_form.html\"\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n language = Language.objects.get(code=kwargs.get(\"language_code\"))\n\n # get event and event translation objects if they exist, otherwise objects are None\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n poi_instance = POI.objects.filter(events=event_instance).first()\n\n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm(\"cms.edit_events\"):\n disabled = True\n messages.warning(\n request, _(\"You don't have the permission to edit this event.\")\n )\n elif event_instance and event_instance.archived:\n disabled = True\n messages.warning(\n request, _(\"You cannot edit this event because it is archived.\")\n )\n else:\n disabled = False\n\n event_form = EventForm(instance=event_instance, disabled=disabled)\n event_translation_form = EventTranslationForm(\n instance=event_translation_instance, disabled=disabled\n )\n recurrence_rule_form = RecurrenceRuleForm(\n instance=recurrence_rule_instance, disabled=disabled\n )\n\n return render(\n request,\n self.template_name,\n {\n \"current_menu_item\": \"events\",\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi_instance,\n \"language\": language,\n \"languages\": Region.get_current_region(request).languages\n if event_instance\n else [language],\n },\n )\n\n # pylint: disable=too-many-locals,too-many-branches\n def post(self, request, **kwargs):\n region = Region.objects.get(slug=kwargs.get(\"region_slug\"))\n language = Language.objects.get(code=kwargs.get(\"language_code\"))\n poi = POI.objects.filter(id=request.POST.get(\"poi_id\")).first()\n\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n\n if not request.user.has_perm(\"cms.edit_events\"):\n raise PermissionDenied\n\n event_form = EventForm(data=request.POST, instance=event_instance,)\n # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation\n event_form_valid = event_form.is_valid()\n recurrence_rule_form = RecurrenceRuleForm(\n data=request.POST,\n instance=recurrence_rule_instance,\n event_start_date=event_form.cleaned_data.get(\"start_date\", None),\n )\n event_translation_form = EventTranslationForm(\n data=request.POST,\n instance=event_translation_instance,\n region=region,\n language=language,\n )\n\n if (\n not event_form_valid\n or not event_translation_form.is_valid()\n or (\n event_form.cleaned_data[\"is_recurring\"]\n and not recurrence_rule_form.is_valid()\n )\n ):\n forms = [event_form, event_translation_form]\n if event_form.cleaned_data[\"is_recurring\"]:\n forms.append(recurrence_rule_form)\n # Add error messages\n for form in forms:\n for field in form:\n for error in field.errors:\n messages.error(request, _(error))\n for error in form.non_field_errors():\n messages.error(request, _(error))\n\n elif (\n not event_form.has_changed()\n and not event_translation_form.has_changed()\n and (\n not event_form.cleaned_data[\"is_recurring\"]\n or not recurrence_rule_form.has_changed()\n )\n and poi == event_instance.location\n ):\n\n messages.info(request, _(\"No changes detected.\"))\n\n else:\n\n if event_translation_form.instance.status == status.PUBLIC:\n if not request.user.has_perm(\"cms.publish_events\"):\n raise PermissionDenied\n\n if event_form.cleaned_data[\"is_recurring\"]:\n recurrence_rule = recurrence_rule_form.save()\n else:\n recurrence_rule = None\n\n event = event_form.save(\n region=region, recurrence_rule=recurrence_rule, location=poi\n )\n event_translation = event_translation_form.save(\n event=event, user=request.user\n )\n\n published = event_translation.status == status.PUBLIC\n if not event_instance:\n if published:\n messages.success(\n request, _(\"Event was successfully created and published.\")\n )\n else:\n messages.success(request, _(\"Event was successfully created.\"))\n return redirect(\n \"edit_event\",\n **{\n \"event_id\": event.id,\n \"region_slug\": region.slug,\n \"language_code\": language.code,\n }\n )\n if not event_translation_instance:\n if published:\n messages.success(\n request,\n _(\"Event translation was successfully created and published.\"),\n )\n else:\n messages.success(\n request, _(\"Event translation was successfully created.\")\n )\n else:\n if published:\n messages.success(request, _(\"Event was successfully published.\"))\n else:\n messages.success(request, _(\"Event was successfully saved.\"))\n\n return render(\n request,\n self.template_name,\n {\n \"current_menu_item\": \"events\",\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi,\n \"language\": language,\n \"languages\": region.languages if event_instance else [language],\n },\n )\n", "path": "src/cms/views/events/event_view.py"}]} | 2,402 | 130 |
gh_patches_debug_126 | rasdani/github-patches | git_diff | holoviz__panel-3990 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clearing value of a DatetimePicker
#### Description of expected behavior and the observed behavior
Not sure if this is a bug or a new feature to Panel. Let's say I have a layout consisting of a button named "Edit", a DatetimePicker disabled with no default value, and a button named "Submit". At the time of initialization, the value of DatetimePicker is Null. The way these objects interact is as follows:
- Click "Edit" button, DatetimePicker is enabled so user can select a specific time value.
- Click "Submit" button, the selected time value will be pushed to the DB, and the DatetimePicker will be disabled and reset back to Null.
I have tried several ways with no success in clearing the value of the DatetimePicker.
#### Complete, minimal, self-contained example code that reproduces the issue
```
time_widget = pn.widgets.DatetimePicker(disabled=True)
time_widget.value = now()
# how to set value back to None?
time_widget.value = None/pandas.NaT/np.nan => all causes error
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/models/datetime_picker.py`
Content:
```
1 from bokeh.core.enums import CalendarPosition
2 from bokeh.core.properties import (
3 Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,
4 )
5 from bokeh.models.widgets.inputs import InputWidget
6
7
8 class DatetimePicker(InputWidget):
9 ''' Calendar-based date picker widget.
10
11 '''
12
13 value = String(help="""
14 The initial or picked date.
15 """)
16
17 min_date = Nullable(Either(Date, Datetime), help="""
18 Optional earliest allowable date.
19 """)
20
21 max_date = Nullable(Either(Date, Datetime), help="""
22 Optional latest allowable date.
23 """)
24
25 disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help="""
26 A list of dates of ``(start, end)`` date ranges to make unavailable for
27 selection. All other dates will be avalable.
28
29 .. note::
30 Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.
31 """)
32
33 enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help="""
34 A list of dates of ``(start, end)`` date ranges to make available for
35 selection. All other dates will be unavailable.
36
37 .. note::
38 Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.
39 """)
40
41 position = Enum(CalendarPosition, default="auto", help="""
42 Where the calendar is rendered relative to the input when ``inline`` is False.
43 """)
44
45 inline = Bool(default=False, help="""
46 Whether the calendar sholud be displayed inline.
47 """)
48
49 enable_time = Bool(default=True)
50
51 enable_seconds = Bool(default=True)
52
53 military_time = Bool(default=True)
54
55 date_format = String("Y-m-d H:i:S")
56
57 mode = String(default="single", help="""
58 Should either be "single" or "range".""")
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/models/datetime_picker.py b/panel/models/datetime_picker.py
--- a/panel/models/datetime_picker.py
+++ b/panel/models/datetime_picker.py
@@ -10,7 +10,7 @@
'''
- value = String(help="""
+ value = Nullable(String, help="""
The initial or picked date.
""")
| {"golden_diff": "diff --git a/panel/models/datetime_picker.py b/panel/models/datetime_picker.py\n--- a/panel/models/datetime_picker.py\n+++ b/panel/models/datetime_picker.py\n@@ -10,7 +10,7 @@\n \n '''\n \n- value = String(help=\"\"\"\n+ value = Nullable(String, help=\"\"\"\n The initial or picked date.\n \"\"\")\n", "issue": "Clearing value of a DatetimePicker\n#### Description of expected behavior and the observed behavior\r\nNot sure if this is a bug or a new feature to Panel. Let's say I have a layout consisting of a button named \"Edit\", a DatetimePicker disabled with no default value, and a button named \"Submit\". At the time of initialization, the value of DatetimePicker is Null. The way these objects interact is as follows:\r\n- Click \"Edit\" button, DatetimePicker is enabled so user can select a specific time value.\r\n- Click \"Submit\" button, the selected time value will be pushed to the DB, and the DatetimePicker will be disabled and reset back to Null.\r\n\r\nI have tried several ways with no success in clearing the value of the DatetimePicker.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```\r\ntime_widget = pn.widgets.DatetimePicker(disabled=True)\r\ntime_widget.value = now()\r\n\r\n# how to set value back to None?\r\ntime_widget.value = None/pandas.NaT/np.nan => all causes error\r\n```\r\n\n", "before_files": [{"content": "from bokeh.core.enums import CalendarPosition\nfrom bokeh.core.properties import (\n Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,\n)\nfrom bokeh.models.widgets.inputs import InputWidget\n\n\nclass DatetimePicker(InputWidget):\n ''' Calendar-based date picker widget.\n\n '''\n\n value = String(help=\"\"\"\n The initial or picked date.\n \"\"\")\n\n min_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional earliest allowable date.\n \"\"\")\n\n max_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional latest allowable date.\n \"\"\")\n\n disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make unavailable for\n selection. All other dates will be avalable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make available for\n selection. All other dates will be unavailable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n position = Enum(CalendarPosition, default=\"auto\", help=\"\"\"\n Where the calendar is rendered relative to the input when ``inline`` is False.\n \"\"\")\n\n inline = Bool(default=False, help=\"\"\"\n Whether the calendar sholud be displayed inline.\n \"\"\")\n\n enable_time = Bool(default=True)\n\n enable_seconds = Bool(default=True)\n\n military_time = Bool(default=True)\n\n date_format = String(\"Y-m-d H:i:S\")\n\n mode = String(default=\"single\", help=\"\"\"\n Should either be \"single\" or \"range\".\"\"\")\n", "path": "panel/models/datetime_picker.py"}], "after_files": [{"content": "from bokeh.core.enums import CalendarPosition\nfrom bokeh.core.properties import (\n Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,\n)\nfrom bokeh.models.widgets.inputs import InputWidget\n\n\nclass DatetimePicker(InputWidget):\n ''' Calendar-based date picker widget.\n\n '''\n\n value = Nullable(String, help=\"\"\"\n The initial or picked date.\n \"\"\")\n\n min_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional earliest allowable date.\n \"\"\")\n\n max_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional latest allowable date.\n \"\"\")\n\n disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make unavailable for\n selection. All other dates will be avalable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make available for\n selection. All other dates will be unavailable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n position = Enum(CalendarPosition, default=\"auto\", help=\"\"\"\n Where the calendar is rendered relative to the input when ``inline`` is False.\n \"\"\")\n\n inline = Bool(default=False, help=\"\"\"\n Whether the calendar sholud be displayed inline.\n \"\"\")\n\n enable_time = Bool(default=True)\n\n enable_seconds = Bool(default=True)\n\n military_time = Bool(default=True)\n\n date_format = String(\"Y-m-d H:i:S\")\n\n mode = String(default=\"single\", help=\"\"\"\n Should either be \"single\" or \"range\".\"\"\")\n", "path": "panel/models/datetime_picker.py"}]} | 1,024 | 84 |
gh_patches_debug_8704 | rasdani/github-patches | git_diff | sublimelsp__LSP-1557 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[regression] lsp_execute does nothing due to empty session
Since this [commit](https://github.com/sublimelsp/LSP/commit/7d05794fa3cc4ecd3931d09a90e801addc70d9fa) the `capability` variable got deleted which means that `self.best_session(self.capability)` is unable to find session.
The consequence is that [LSP-metals.sublime-commands](https://github.com/scalameta/metals-sublime/blob/master/LSP-metals.sublime-commands) aren't executed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/execute_command.py`
Content:
```
1 import sublime
2 from .core.protocol import Error
3 from .core.protocol import ExecuteCommandParams
4 from .core.registry import LspTextCommand
5 from .core.registry import windows
6 from .core.typing import List, Optional, Any
7 from .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier
8
9
10 class LspExecuteCommand(LspTextCommand):
11
12 def run(self,
13 edit: sublime.Edit,
14 command_name: Optional[str] = None,
15 command_args: Optional[List[Any]] = None,
16 session_name: Optional[str] = None,
17 event: Optional[dict] = None) -> None:
18 # Handle VSCode-specific command for triggering AC/sighelp
19 if command_name == "editor.action.triggerSuggest":
20 # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.
21 return sublime.set_timeout(lambda: self.view.run_command("auto_complete"))
22 if command_name == "editor.action.triggerParameterHints":
23
24 def run_async() -> None:
25 listener = windows.listener_for_view(self.view)
26 if listener:
27 listener.do_signature_help_async(manual=False)
28
29 return sublime.set_timeout_async(run_async)
30 session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)
31 if session and command_name:
32 if command_args:
33 self._expand_variables(command_args)
34 params = {"command": command_name} # type: ExecuteCommandParams
35 if command_args:
36 params["arguments"] = command_args
37
38 def handle_response(response: Any) -> None:
39 assert command_name
40 if isinstance(response, Error):
41 sublime.message_dialog("command {} failed. Reason: {}".format(command_name, str(response)))
42 return
43 msg = "command {} completed".format(command_name)
44 if response:
45 msg += "with response: {}".format(response)
46 window = self.view.window()
47 if window:
48 window.status_message(msg)
49
50 session.execute_command(params, progress=True).then(handle_response)
51
52 def _expand_variables(self, command_args: List[Any]) -> None:
53 region = self.view.sel()[0]
54 for i, arg in enumerate(command_args):
55 if arg in ["$document_id", "${document_id}"]:
56 command_args[i] = text_document_identifier(self.view)
57 if arg in ["$file_uri", "${file_uri}"]:
58 command_args[i] = uri_from_view(self.view)
59 elif arg in ["$selection", "${selection}"]:
60 command_args[i] = self.view.substr(region)
61 elif arg in ["$offset", "${offset}"]:
62 command_args[i] = region.b
63 elif arg in ["$selection_begin", "${selection_begin}"]:
64 command_args[i] = region.begin()
65 elif arg in ["$selection_end", "${selection_end}"]:
66 command_args[i] = region.end()
67 elif arg in ["$position", "${position}"]:
68 command_args[i] = offset_to_point(self.view, region.b).to_lsp()
69 elif arg in ["$range", "${range}"]:
70 command_args[i] = region_to_range(self.view, region).to_lsp()
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/execute_command.py b/plugin/execute_command.py
--- a/plugin/execute_command.py
+++ b/plugin/execute_command.py
@@ -27,7 +27,7 @@
listener.do_signature_help_async(manual=False)
return sublime.set_timeout_async(run_async)
- session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)
+ session = self.session_by_name(session_name if session_name else self.session_name)
if session and command_name:
if command_args:
self._expand_variables(command_args)
| {"golden_diff": "diff --git a/plugin/execute_command.py b/plugin/execute_command.py\n--- a/plugin/execute_command.py\n+++ b/plugin/execute_command.py\n@@ -27,7 +27,7 @@\n listener.do_signature_help_async(manual=False)\n \n return sublime.set_timeout_async(run_async)\n- session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)\n+ session = self.session_by_name(session_name if session_name else self.session_name)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n", "issue": "[regression] lsp_execute does nothing due to empty session\nSince this [commit](https://github.com/sublimelsp/LSP/commit/7d05794fa3cc4ecd3931d09a90e801addc70d9fa) the `capability` variable got deleted which means that `self.best_session(self.capability)` is unable to find session.\r\n\r\nThe consequence is that [LSP-metals.sublime-commands](https://github.com/scalameta/metals-sublime/blob/master/LSP-metals.sublime-commands) aren't executed.\r\n\r\n \r\n\n", "before_files": [{"content": "import sublime\nfrom .core.protocol import Error\nfrom .core.protocol import ExecuteCommandParams\nfrom .core.registry import LspTextCommand\nfrom .core.registry import windows\nfrom .core.typing import List, Optional, Any\nfrom .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier\n\n\nclass LspExecuteCommand(LspTextCommand):\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n session_name: Optional[str] = None,\n event: Optional[dict] = None) -> None:\n # Handle VSCode-specific command for triggering AC/sighelp\n if command_name == \"editor.action.triggerSuggest\":\n # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n return sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n if command_name == \"editor.action.triggerParameterHints\":\n\n def run_async() -> None:\n listener = windows.listener_for_view(self.view)\n if listener:\n listener.do_signature_help_async(manual=False)\n\n return sublime.set_timeout_async(run_async)\n session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n params = {\"command\": command_name} # type: ExecuteCommandParams\n if command_args:\n params[\"arguments\"] = command_args\n\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(response)))\n return\n msg = \"command {} completed\".format(command_name)\n if response:\n msg += \"with response: {}\".format(response)\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n session.execute_command(params, progress=True).then(handle_response)\n\n def _expand_variables(self, command_args: List[Any]) -> None:\n region = self.view.sel()[0]\n for i, arg in enumerate(command_args):\n if arg in [\"$document_id\", \"${document_id}\"]:\n command_args[i] = text_document_identifier(self.view)\n if arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(self.view)\n elif arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = self.view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(self.view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(self.view, region).to_lsp()\n", "path": "plugin/execute_command.py"}], "after_files": [{"content": "import sublime\nfrom .core.protocol import Error\nfrom .core.protocol import ExecuteCommandParams\nfrom .core.registry import LspTextCommand\nfrom .core.registry import windows\nfrom .core.typing import List, Optional, Any\nfrom .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier\n\n\nclass LspExecuteCommand(LspTextCommand):\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n session_name: Optional[str] = None,\n event: Optional[dict] = None) -> None:\n # Handle VSCode-specific command for triggering AC/sighelp\n if command_name == \"editor.action.triggerSuggest\":\n # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n return sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n if command_name == \"editor.action.triggerParameterHints\":\n\n def run_async() -> None:\n listener = windows.listener_for_view(self.view)\n if listener:\n listener.do_signature_help_async(manual=False)\n\n return sublime.set_timeout_async(run_async)\n session = self.session_by_name(session_name if session_name else self.session_name)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n params = {\"command\": command_name} # type: ExecuteCommandParams\n if command_args:\n params[\"arguments\"] = command_args\n\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(response)))\n return\n msg = \"command {} completed\".format(command_name)\n if response:\n msg += \"with response: {}\".format(response)\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n session.execute_command(params, progress=True).then(handle_response)\n\n def _expand_variables(self, command_args: List[Any]) -> None:\n region = self.view.sel()[0]\n for i, arg in enumerate(command_args):\n if arg in [\"$document_id\", \"${document_id}\"]:\n command_args[i] = text_document_identifier(self.view)\n if arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(self.view)\n elif arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = self.view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(self.view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(self.view, region).to_lsp()\n", "path": "plugin/execute_command.py"}]} | 1,189 | 124 |
gh_patches_debug_12729 | rasdani/github-patches | git_diff | facebookresearch__fairscale-86 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[feat] OSS: Support nvidia's LARC
## 🚀 Feature
Make it possible to support LARC with OSS
## Motivation
LARC is a must have for large batch jobs, right now OSS will break on LARC because of the closure() being passed
## Pitch
Should be doable to gracefully handle optimizers with do not support closures in step()
## Alternatives
Not supporting LARC, reduces a lot of OSS interest
## Additional context
cc @mannatsingh @prigoyal @msbaines
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairscale/optim/oss.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import copy
7 from itertools import chain
8 import logging
9 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type
10
11 import torch
12 import torch.distributed as dist
13 from torch.optim import SGD, Optimizer
14
15 from .utils import broadcast_object, recursive_copy_to_device
16
17 if TYPE_CHECKING: # pragma: no cover
18 from torch.optim.optimizer import _params_t
19 else:
20 _params_t = Any
21
22
23 class OSS(Optimizer):
24 """Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
25 optimizer and shards its state as described by ZeRO_.
26 ::
27 opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
28
29 .. _ZeRO: https://arxiv.org/abs/1910.02054
30
31 We use a greedy algorithm to pack a number of parameters
32 at each rank. Each parameter belongs to a single rank and
33 is not divided among rank.
34
35 After each rank completed their parameter update, they broadcast
36 the new version of the parameters to all other ranks to synchronize
37 the parameters for next round forward/backward computation.
38
39 Args:
40 params (list of tensors):
41 parameters to be optimized
42 Keyword Args:
43 optim (torch.nn.Optimizer):
44 optimizer to shard (default: SGD)
45 group (group):
46 torch.distributed group (default: group.WORLD)
47 """
48
49 optim: Optimizer
50 in_super_constructor: bool
51
52 def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):
53 # Hold all the model params in the root .param_groups
54 self.in_super_constructor = True
55 super().__init__(params, defaults)
56 self.in_super_constructor = False
57
58 # Build the wrapped optimizer, responsible for a shard of the params
59 self.group = group
60 self.rank = dist.get_rank(group)
61 split_param_groups = self.partition_parameters()
62 self.optim = optim(split_param_groups[self.rank], **defaults)
63
64 # Optional consolidated optimizer state
65 self._all_states: List[Dict[str, Any]] = []
66
67 # Current device is set by the parameters allocated to this rank
68 self._device = split_param_groups[self.rank][0]["params"][0].device
69
70 # Sync local and global param_groups keys
71 for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
72 for k, v in local_group.items():
73 if k != "params":
74 global_group[k] = v
75
76 def partition_parameters(self) -> List[List[dict]]:
77 """Partitions parameters across distributed ranks.
78
79 Returns a list of param_groups (which is a list of dict) where each
80 element of the list contains the param_groups for a rank. Element 0
81 corresponds to rank 0, etc. We need all the ranks for the broadcast
82 inside step().
83 """
84 world_size = dist.get_world_size(self.group)
85 param_groups: List[List] = [list() for _ in range(world_size)]
86 sizes = [0] * world_size
87 for param_group in self.param_groups:
88 param_lists: List[List] = [list() for _ in range(world_size)]
89 for param in param_group["params"]:
90 # Add this param to rank with smallest size.
91 rank = sizes.index(min(sizes))
92 param_lists[rank].append(param)
93 sizes[rank] += param.numel()
94 for rank, params in enumerate(param_lists):
95 param_group_rank = copy.copy(param_group)
96 param_group_rank["params"] = params
97 param_groups[rank].append(param_group_rank)
98 return param_groups
99
100 # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.
101 # For example, the apex library contains fused optimizers with a step that supports extra kwargs.
102 def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:
103 # Sync oss param_groups attributes in case they've been updated by a scheduler.
104 self._sync_param_groups()
105
106 # Run the optimizer step on this shard only
107 loss = self.optim.step(closure=closure, **kwargs) # type: ignore
108
109 # Sync all the states. Broadcast requests are issued async, we check completeness before moving on
110 requests = []
111 for rank, param_groups in enumerate(self.partition_parameters()):
112 for param_group in param_groups:
113 for param in param_group["params"]:
114 requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))
115
116 _ = list(map(lambda x: x.wait(), requests))
117 return loss
118
119 def local_state_dict(self) -> dict:
120 """ Gets this rank's state_dict. """
121 return self.optim.state_dict()
122
123 def consolidate_state_dict(self, recipient_rank: int = 0) -> None:
124 """ Update the consolidated state_dict list, one per rank.
125
126 This needs to be called on all replicas """
127
128 # Sync lr and other attributes in case its been updated
129 self._sync_param_groups()
130
131 if self.rank == recipient_rank:
132 # Pull the sharded state from all the other replicas
133 # Store all the states in order, rank by rank
134 logging.debug("Pulling the sharded optimizer state from all replicas")
135 self._all_states = self._collect_sharded_states()
136 else:
137 # Acknowledge broadcasts, and send this rank's shard when needed
138 self._broadcast_state_dict()
139
140 def state_dict(self) -> Dict[str, Any]:
141 """
142 Return the last known global optimizer state, which consist of a list of the shards.
143
144 NOTE: This is limited to the replica which was responsible for the consolidation.
145 The state may also not be up to date, depending on when `consolidate_state_dict` was last called.
146 """
147
148 assert (
149 len(self._all_states) > 0
150 ), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
151
152 # Flatten the param_groups, save the partition which logs the rank <> shard correspondence
153 partition: List[Tuple[int, int]] = []
154 param_groups: List[Dict[Any, Any]] = []
155
156 start = 0
157 for i, s in enumerate(self._all_states):
158 param_groups.extend(s["param_groups"])
159 end = start + len(s["param_groups"])
160 partition.append((start, end))
161 start = end
162
163 return {
164 "state": [s["state"] for s in self._all_states],
165 "param_groups": param_groups,
166 "partition": partition,
167 }
168
169 def load_local_state_dict(self, state_dict: dict) -> None:
170 """ Loads this rank's state_dict. """
171
172 self.optim.load_state_dict(state_dict)
173
174 # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)
175 # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268
176 groups = self.optim.param_groups
177 saved_groups = state_dict["param_groups"]
178 id_map = {
179 old_id: p
180 for old_id, p in zip(chain(*(g["params"] for g in saved_groups)), chain(*(g["params"] for g in groups)))
181 }
182 for k, v in state_dict["state"].items():
183 if k in id_map:
184 param = id_map[k]
185 self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)
186
187 # Restore the global param_groups (the params themselves are already correct)
188 for global_group, local_group in zip(self.param_groups, groups):
189 for k, v in local_group.items():
190 if k != "params":
191 global_group[k] = v
192
193 def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
194 """ Restore the global parameter groups as well as the shard """
195
196 # Get this optimizer's param_groups shard
197 param_groups = state_dict["param_groups"][
198 state_dict["partition"][self.rank][0] : state_dict["partition"][self.rank][1]
199 ]
200 # Dispatch this rank's state dictionary to the wrapped shard optimizer
201 self.load_local_state_dict({"state": state_dict["state"][self.rank], "param_groups": param_groups})
202
203 def add_param_group(self, param_group: dict) -> None:
204 super().add_param_group(param_group)
205 if not self.in_super_constructor:
206 param_groups = self.partition_parameters()[self.rank]
207 if len(param_groups) == len(self.optim.param_groups) + 1:
208 self.optim.add_param_group(param_groups[-1])
209
210 def _sync_param_groups(self) -> None:
211 """Sync learning rate and other optimizer attributes (needed to support schedulers)."""
212 for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
213 for k in local_group.keys():
214 if k != "params":
215 # Params have been sharded and should not be synced here
216 local_group[k] = global_group[k]
217
218 def _collect_sharded_states(self) -> List[Dict[str, Any]]:
219 """
220 Collect all the state shards, in CPU memory.
221 """
222 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
223 all_states: List[Dict[str, Any]] = []
224
225 for rank in range(dist.get_world_size(group=self.group)):
226 if rank == self.rank:
227 logging.debug("Saving self state")
228 all_states.append(
229 recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device("cpu"))
230 )
231
232 # Sync with other replicas
233 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
234 else:
235 # Fetch the optim state from the other replicas
236 logging.debug("Receiving state from rank %s ", rank)
237 replica_state = broadcast_object(
238 empty_buffer, src_rank=rank, group=self.group, dist_device=self._device
239 )
240
241 all_states.append(
242 recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device("cpu"))
243 )
244
245 logging.debug("State from rank %s received", rank)
246
247 return all_states
248
249 def _broadcast_state_dict(self) -> None:
250 """
251 Broadcast this rank's state shard, discard others
252 """
253 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
254
255 for rank in range(dist.get_world_size(group=self.group)):
256 if rank == self.rank:
257 # Send the state to the reference replica
258 logging.debug(
259 "Sending the sharded optimizer state to the reference replica from rank %s", rank,
260 )
261 broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)
262 else:
263 # Discard this tensor/rank, broadcast necessary for syncing
264 logging.debug("Discarding broadcast from rank %s", rank)
265 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py
--- a/fairscale/optim/oss.py
+++ b/fairscale/optim/oss.py
@@ -103,8 +103,11 @@
# Sync oss param_groups attributes in case they've been updated by a scheduler.
self._sync_param_groups()
- # Run the optimizer step on this shard only
- loss = self.optim.step(closure=closure, **kwargs) # type: ignore
+ # Run the optimizer step on this shard only:
+ if closure is not None:
+ loss = self.optim.step(closure=closure, **kwargs) # type: ignore
+ else:
+ loss = self.optim.step(**kwargs)
# Sync all the states. Broadcast requests are issued async, we check completeness before moving on
requests = []
| {"golden_diff": "diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py\n--- a/fairscale/optim/oss.py\n+++ b/fairscale/optim/oss.py\n@@ -103,8 +103,11 @@\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n \n- # Run the optimizer step on this shard only\n- loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n+ # Run the optimizer step on this shard only:\n+ if closure is not None:\n+ loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n+ else:\n+ loss = self.optim.step(**kwargs)\n \n # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n requests = []\n", "issue": "[feat] OSS: Support nvidia's LARC\n## \ud83d\ude80 Feature\r\nMake it possible to support LARC with OSS\r\n\r\n## Motivation\r\nLARC is a must have for large batch jobs, right now OSS will break on LARC because of the closure() being passed\r\n\r\n## Pitch\r\nShould be doable to gracefully handle optimizers with do not support closures in step()\r\n\r\n## Alternatives\r\nNot supporting LARC, reduces a lot of OSS interest\r\n\r\n## Additional context\r\n\r\ncc @mannatsingh @prigoyal @msbaines \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom itertools import chain\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group\n self.rank = dist.get_rank(group)\n split_param_groups = self.partition_parameters()\n self.optim = optim(split_param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n\n # Sync local and global param_groups keys\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n param_groups[rank].append(param_group_rank)\n return param_groups\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n\n # Run the optimizer step on this shard only\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n\n # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n requests = []\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))\n\n _ = list(map(lambda x: x.wait(), requests))\n return loss\n\n def local_state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n # Sync lr and other attributes in case its been updated\n self._sync_param_groups()\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE: This is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n assert (\n len(self._all_states) > 0\n ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n\n # Flatten the param_groups, save the partition which logs the rank <> shard correspondence\n partition: List[Tuple[int, int]] = []\n param_groups: List[Dict[Any, Any]] = []\n\n start = 0\n for i, s in enumerate(self._all_states):\n param_groups.extend(s[\"param_groups\"])\n end = start + len(s[\"param_groups\"])\n partition.append((start, end))\n start = end\n\n return {\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": param_groups,\n \"partition\": partition,\n }\n\n def load_local_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n\n self.optim.load_state_dict(state_dict)\n\n # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)\n # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268\n groups = self.optim.param_groups\n saved_groups = state_dict[\"param_groups\"]\n id_map = {\n old_id: p\n for old_id, p in zip(chain(*(g[\"params\"] for g in saved_groups)), chain(*(g[\"params\"] for g in groups)))\n }\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)\n\n # Restore the global param_groups (the params themselves are already correct)\n for global_group, local_group in zip(self.param_groups, groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n\n # Get this optimizer's param_groups shard\n param_groups = state_dict[\"param_groups\"][\n state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n ]\n # Dispatch this rank's state dictionary to the wrapped shard optimizer\n self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n def _sync_param_groups(self) -> None:\n \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k in local_group.keys():\n if k != \"params\":\n # Params have been sharded and should not be synced here\n local_group[k] = global_group[k]\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n Collect all the state shards, in CPU memory.\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n all_states: List[Dict[str, Any]] = []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Fetch the optim state from the other replicas\n logging.debug(\"Receiving state from rank %s \", rank)\n replica_state = broadcast_object(\n empty_buffer, src_rank=rank, group=self.group, dist_device=self._device\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.debug(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n", "path": "fairscale/optim/oss.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom itertools import chain\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group\n self.rank = dist.get_rank(group)\n split_param_groups = self.partition_parameters()\n self.optim = optim(split_param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n\n # Sync local and global param_groups keys\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n param_groups[rank].append(param_group_rank)\n return param_groups\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n\n # Run the optimizer step on this shard only:\n if closure is not None:\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n else:\n loss = self.optim.step(**kwargs)\n\n # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n requests = []\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))\n\n _ = list(map(lambda x: x.wait(), requests))\n return loss\n\n def local_state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n # Sync lr and other attributes in case its been updated\n self._sync_param_groups()\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE: This is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n assert (\n len(self._all_states) > 0\n ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n\n # Flatten the param_groups, save the partition which logs the rank <> shard correspondence\n partition: List[Tuple[int, int]] = []\n param_groups: List[Dict[Any, Any]] = []\n\n start = 0\n for i, s in enumerate(self._all_states):\n param_groups.extend(s[\"param_groups\"])\n end = start + len(s[\"param_groups\"])\n partition.append((start, end))\n start = end\n\n return {\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": param_groups,\n \"partition\": partition,\n }\n\n def load_local_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n\n self.optim.load_state_dict(state_dict)\n\n # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)\n # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268\n groups = self.optim.param_groups\n saved_groups = state_dict[\"param_groups\"]\n id_map = {\n old_id: p\n for old_id, p in zip(chain(*(g[\"params\"] for g in saved_groups)), chain(*(g[\"params\"] for g in groups)))\n }\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)\n\n # Restore the global param_groups (the params themselves are already correct)\n for global_group, local_group in zip(self.param_groups, groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n\n # Get this optimizer's param_groups shard\n param_groups = state_dict[\"param_groups\"][\n state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n ]\n # Dispatch this rank's state dictionary to the wrapped shard optimizer\n self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n def _sync_param_groups(self) -> None:\n \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k in local_group.keys():\n if k != \"params\":\n # Params have been sharded and should not be synced here\n local_group[k] = global_group[k]\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n Collect all the state shards, in CPU memory.\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n all_states: List[Dict[str, Any]] = []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Fetch the optim state from the other replicas\n logging.debug(\"Receiving state from rank %s \", rank)\n replica_state = broadcast_object(\n empty_buffer, src_rank=rank, group=self.group, dist_device=self._device\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.debug(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n", "path": "fairscale/optim/oss.py"}]} | 3,548 | 197 |
gh_patches_debug_34709 | rasdani/github-patches | git_diff | svthalia__concrexit-3422 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove ObtainThaliaAuthToken view
### Describe the change
Remove https://github.com/svthalia/concrexit/blob/4b664220b406361245e153433288ffbcf9d6f4a9/website/members/views.py#L35
### Motivation
We don't use it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/thaliawebsite/api/v1/urls.py`
Content:
```
1 from django.conf import settings
2 from django.urls import include, path
3
4 from rest_framework.schemas import get_schema_view
5
6 from members.views import ObtainThaliaAuthToken
7 from thaliawebsite.api.openapi import OAuthSchemaGenerator
8
9 app_name = "thaliawebsite"
10
11 urlpatterns = [
12 path("token-auth/", ObtainThaliaAuthToken.as_view()),
13 path("", include("activemembers.api.v1.urls")),
14 path("", include("announcements.api.v1.urls")),
15 path("", include("events.api.v1.urls")),
16 path("", include("members.api.v1.urls")),
17 path("", include("partners.api.v1.urls")),
18 path("", include("pizzas.api.v1.urls")),
19 path("", include("photos.api.v1.urls")),
20 path("", include("pushnotifications.api.v1.urls")),
21 path("", include("payments.api.v1.urls")),
22 path(
23 "schema",
24 get_schema_view(
25 title="API v1",
26 version=settings.SOURCE_COMMIT,
27 url="/api/v1/",
28 urlconf="thaliawebsite.api.v1.urls",
29 generator_class=OAuthSchemaGenerator,
30 ),
31 name="schema",
32 ),
33 ]
34
```
Path: `website/members/views.py`
Content:
```
1 """Views provided by the members package."""
2 import json
3 from datetime import date, datetime
4
5 from django.contrib.auth.decorators import login_required
6 from django.contrib.messages.views import SuccessMessageMixin
7 from django.db.models import Q, QuerySet
8 from django.http import Http404, HttpResponse
9 from django.shortcuts import get_object_or_404
10 from django.template.response import TemplateResponse
11 from django.urls import reverse_lazy
12 from django.utils.decorators import method_decorator
13 from django.utils.translation import gettext_lazy as _
14 from django.views.generic import CreateView, DetailView, UpdateView
15 from django.views.generic.base import TemplateResponseMixin, TemplateView, View
16
17 from rest_framework.authtoken.models import Token
18 from rest_framework.authtoken.views import ObtainAuthToken
19 from rest_framework.response import Response
20
21 import activemembers.services as activemembers_services
22 import events.services as event_services
23 import pizzas.services
24 from members import emails, services
25 from members.decorators import membership_required
26 from members.models import EmailChange, Member, Membership, Profile
27 from thaliawebsite.views import PagedView
28 from utils.media.services import fetch_thumbnails_db
29 from utils.snippets import datetime_to_lectureyear
30
31 from . import models
32 from .forms import ProfileForm
33 from .services import member_achievements, member_societies
34
35
36 class ObtainThaliaAuthToken(ObtainAuthToken):
37 """Custom override of the AuthToken view to force lowercase the username."""
38
39 def post(self, request, *args, **kwargs) -> HttpResponse:
40 serializer = self.serializer_class(
41 data={
42 "username": request.data.get("username").lower()
43 if "username" in request.data
44 else None,
45 "password": request.data.get("password"),
46 },
47 context={"request": request},
48 )
49
50 if not serializer.is_valid():
51 return Response({"error": "Unauthorized"}, status=401)
52
53 user = serializer.validated_data["user"]
54 token, _ = Token.objects.get_or_create(user=user)
55 return Response({"token": token.key})
56
57
58 @method_decorator(login_required, "dispatch")
59 @method_decorator(membership_required, "dispatch")
60 class MembersIndex(PagedView):
61 """View that renders the members overview."""
62
63 model = Member
64 paginate_by = 28
65 template_name = "members/index.html"
66 context_object_name = "members"
67 keywords = None
68 query_filter = ""
69 year_range = []
70
71 def setup(self, request, *args, **kwargs) -> None:
72 super().setup(request, *args, **kwargs)
73 current_lectureyear = datetime_to_lectureyear(date.today())
74 self.year_range = list(
75 reversed(range(current_lectureyear - 5, current_lectureyear + 1))
76 )
77 self.keywords = request.GET.get("keywords", "").split() or None
78 self.query_filter = kwargs.get("filter", None)
79
80 def get_queryset(self) -> QuerySet:
81 memberships_query = Q(until__gt=datetime.now()) | Q(until=None)
82 members_query = ~Q(id=None)
83
84 if self.query_filter and self.query_filter.isdigit():
85 members_query &= Q(profile__starting_year=int(self.query_filter))
86 memberships_query &= Q(type=Membership.MEMBER)
87 elif self.query_filter == "older":
88 members_query &= Q(profile__starting_year__lt=self.year_range[-1])
89 memberships_query &= Q(type=Membership.MEMBER)
90 elif self.query_filter == "former":
91 # Filter out all current active memberships
92 memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
93 memberships = Membership.objects.filter(memberships_query)
94 members_query &= ~Q(pk__in=memberships.values("user__pk"))
95 # Members_query contains users that are not currently (honorary)member
96 elif self.query_filter == "benefactors":
97 memberships_query &= Q(type=Membership.BENEFACTOR)
98 elif self.query_filter == "honorary":
99 memberships_query = Q(until__gt=datetime.now().date()) | Q(until=None)
100 memberships_query &= Q(type=Membership.HONORARY)
101
102 if self.keywords:
103 for key in self.keywords:
104 # Works because relevant options all have `nick` in their key
105 members_query &= (
106 (
107 Q(profile__nickname__icontains=key)
108 & Q(profile__display_name_preference__contains="nick")
109 )
110 | Q(first_name__icontains=key)
111 | Q(last_name__icontains=key)
112 | Q(username__icontains=key)
113 )
114
115 if self.query_filter == "former":
116 memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
117 memberships = Membership.objects.filter(memberships_query)
118 all_memberships = Membership.objects.all()
119 # Only keep members that were once members, or are legacy users
120 # that do not have any memberships at all
121 members_query &= Q(pk__in=memberships.values("user__pk")) | ~Q(
122 pk__in=all_memberships.values("user__pk")
123 )
124 else:
125 memberships = Membership.objects.filter(memberships_query)
126 members_query &= Q(pk__in=memberships.values("user__pk"))
127 members = (
128 Member.objects.filter(members_query)
129 .order_by("first_name")
130 .select_related("profile")
131 )
132 return members
133
134 def get_context_data(self, **kwargs) -> dict:
135 context = super().get_context_data(**kwargs)
136
137 context.update(
138 {
139 "filter": self.query_filter,
140 "year_range": self.year_range,
141 "keywords": self.keywords,
142 }
143 )
144
145 fetch_thumbnails_db(
146 [x.profile.photo for x in context["object_list"] if x.profile.photo]
147 )
148
149 return context
150
151
152 @method_decorator(login_required, "dispatch")
153 class ProfileDetailView(DetailView):
154 """View that renders a member's profile."""
155
156 context_object_name = "member"
157 model = Member
158 template_name = "members/user/profile.html"
159
160 def setup(self, request, *args, **kwargs) -> None:
161 if "pk" not in kwargs and request.member:
162 kwargs["pk"] = request.member.pk
163 super().setup(request, *args, **kwargs)
164
165 def get_context_data(self, **kwargs) -> dict:
166 context = super().get_context_data(**kwargs)
167 member = context["member"]
168
169 achievements = member_achievements(member)
170 societies = member_societies(member)
171
172 membership = member.current_membership
173 membership_type = _("Unknown membership history")
174 if membership:
175 membership_type = membership.get_type_display()
176 elif member.has_been_honorary_member():
177 membership_type = _("Former honorary member")
178 elif member.has_been_member():
179 membership_type = _("Former member")
180 elif member.latest_membership:
181 membership_type = _("Former benefactor")
182
183 context.update(
184 {
185 "achievements": achievements,
186 "societies": societies,
187 "membership_type": membership_type,
188 }
189 )
190
191 return context
192
193
194 @method_decorator(login_required, "dispatch")
195 class UserProfileUpdateView(SuccessMessageMixin, UpdateView):
196 """View that allows a user to update their profile."""
197
198 template_name = "members/user/edit_profile.html"
199 model = Profile
200 form_class = ProfileForm
201 success_url = reverse_lazy("members:edit-profile")
202 success_message = _("Your profile has been updated successfully.")
203
204 def get_object(self, queryset=None) -> Profile:
205 return get_object_or_404(models.Profile, user=self.request.user)
206
207
208 @method_decorator(login_required, "dispatch")
209 class StatisticsView(TemplateView):
210 """View that renders the statistics page."""
211
212 template_name = "members/statistics.html"
213
214 def get_context_data(self, **kwargs) -> dict:
215 context = super().get_context_data(**kwargs)
216 context.update(
217 {
218 "total_members": models.Member.current_members.count(),
219 "cohort_sizes": json.dumps(services.gen_stats_year()),
220 "member_type_distribution": json.dumps(
221 services.gen_stats_member_type()
222 ),
223 "committee_sizes": json.dumps(
224 activemembers_services.generate_statistics()
225 ),
226 "event_categories": json.dumps(
227 event_services.generate_category_statistics()
228 ),
229 "total_pizza_orders": json.dumps(
230 pizzas.services.gen_stats_pizza_orders()
231 ),
232 "active_members": json.dumps(services.gen_stats_active_members()),
233 }
234 )
235
236 return context
237
238
239 @method_decorator(login_required, name="dispatch")
240 class EmailChangeFormView(CreateView):
241 """View that renders the email change form."""
242
243 model = EmailChange
244 fields = ["email", "member"]
245 template_name = "members/user/email_change.html"
246
247 def get_initial(self) -> dict:
248 initial = super().get_initial()
249 initial["email"] = self.request.member.email
250 return initial
251
252 def post(self, request, *args, **kwargs) -> HttpResponse:
253 request.POST = request.POST.dict()
254 request.POST["member"] = request.member.pk
255 return super().post(request, *args, **kwargs)
256
257 def form_valid(self, form) -> HttpResponse:
258 change_request = form.save()
259 emails.send_email_change_confirmation_messages(change_request)
260 return TemplateResponse(
261 request=self.request, template="members/user/email_change_requested.html"
262 )
263
264
265 @method_decorator(login_required, name="dispatch")
266 class EmailChangeConfirmView(View, TemplateResponseMixin):
267 """View that renders an HTML template and confirms the old email address."""
268
269 template_name = "members/user/email_change_confirmed.html"
270
271 def get(self, request, *args, **kwargs) -> HttpResponse:
272 if not EmailChange.objects.filter(confirm_key=kwargs["key"]).exists():
273 raise Http404
274
275 change_request = EmailChange.objects.get(confirm_key=kwargs["key"])
276
277 services.confirm_email_change(change_request)
278
279 return self.render_to_response({})
280
281
282 @method_decorator(login_required, name="dispatch")
283 class EmailChangeVerifyView(View, TemplateResponseMixin):
284 """View that renders an HTML template and verifies the new email address."""
285
286 template_name = "members/user/email_change_verified.html"
287
288 def get(self, request, *args, **kwargs) -> HttpResponse:
289 if not EmailChange.objects.filter(verify_key=kwargs["key"]).exists():
290 raise Http404
291
292 change_request = EmailChange.objects.get(verify_key=kwargs["key"])
293
294 services.verify_email_change(change_request)
295
296 return self.render_to_response({})
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/members/views.py b/website/members/views.py
--- a/website/members/views.py
+++ b/website/members/views.py
@@ -14,10 +14,6 @@
from django.views.generic import CreateView, DetailView, UpdateView
from django.views.generic.base import TemplateResponseMixin, TemplateView, View
-from rest_framework.authtoken.models import Token
-from rest_framework.authtoken.views import ObtainAuthToken
-from rest_framework.response import Response
-
import activemembers.services as activemembers_services
import events.services as event_services
import pizzas.services
@@ -33,28 +29,6 @@
from .services import member_achievements, member_societies
-class ObtainThaliaAuthToken(ObtainAuthToken):
- """Custom override of the AuthToken view to force lowercase the username."""
-
- def post(self, request, *args, **kwargs) -> HttpResponse:
- serializer = self.serializer_class(
- data={
- "username": request.data.get("username").lower()
- if "username" in request.data
- else None,
- "password": request.data.get("password"),
- },
- context={"request": request},
- )
-
- if not serializer.is_valid():
- return Response({"error": "Unauthorized"}, status=401)
-
- user = serializer.validated_data["user"]
- token, _ = Token.objects.get_or_create(user=user)
- return Response({"token": token.key})
-
-
@method_decorator(login_required, "dispatch")
@method_decorator(membership_required, "dispatch")
class MembersIndex(PagedView):
diff --git a/website/thaliawebsite/api/v1/urls.py b/website/thaliawebsite/api/v1/urls.py
--- a/website/thaliawebsite/api/v1/urls.py
+++ b/website/thaliawebsite/api/v1/urls.py
@@ -3,13 +3,11 @@
from rest_framework.schemas import get_schema_view
-from members.views import ObtainThaliaAuthToken
from thaliawebsite.api.openapi import OAuthSchemaGenerator
app_name = "thaliawebsite"
urlpatterns = [
- path("token-auth/", ObtainThaliaAuthToken.as_view()),
path("", include("activemembers.api.v1.urls")),
path("", include("announcements.api.v1.urls")),
path("", include("events.api.v1.urls")),
| {"golden_diff": "diff --git a/website/members/views.py b/website/members/views.py\n--- a/website/members/views.py\n+++ b/website/members/views.py\n@@ -14,10 +14,6 @@\n from django.views.generic import CreateView, DetailView, UpdateView\n from django.views.generic.base import TemplateResponseMixin, TemplateView, View\n \n-from rest_framework.authtoken.models import Token\n-from rest_framework.authtoken.views import ObtainAuthToken\n-from rest_framework.response import Response\n-\n import activemembers.services as activemembers_services\n import events.services as event_services\n import pizzas.services\n@@ -33,28 +29,6 @@\n from .services import member_achievements, member_societies\n \n \n-class ObtainThaliaAuthToken(ObtainAuthToken):\n- \"\"\"Custom override of the AuthToken view to force lowercase the username.\"\"\"\n-\n- def post(self, request, *args, **kwargs) -> HttpResponse:\n- serializer = self.serializer_class(\n- data={\n- \"username\": request.data.get(\"username\").lower()\n- if \"username\" in request.data\n- else None,\n- \"password\": request.data.get(\"password\"),\n- },\n- context={\"request\": request},\n- )\n-\n- if not serializer.is_valid():\n- return Response({\"error\": \"Unauthorized\"}, status=401)\n-\n- user = serializer.validated_data[\"user\"]\n- token, _ = Token.objects.get_or_create(user=user)\n- return Response({\"token\": token.key})\n-\n-\n @method_decorator(login_required, \"dispatch\")\n @method_decorator(membership_required, \"dispatch\")\n class MembersIndex(PagedView):\ndiff --git a/website/thaliawebsite/api/v1/urls.py b/website/thaliawebsite/api/v1/urls.py\n--- a/website/thaliawebsite/api/v1/urls.py\n+++ b/website/thaliawebsite/api/v1/urls.py\n@@ -3,13 +3,11 @@\n \n from rest_framework.schemas import get_schema_view\n \n-from members.views import ObtainThaliaAuthToken\n from thaliawebsite.api.openapi import OAuthSchemaGenerator\n \n app_name = \"thaliawebsite\"\n \n urlpatterns = [\n- path(\"token-auth/\", ObtainThaliaAuthToken.as_view()),\n path(\"\", include(\"activemembers.api.v1.urls\")),\n path(\"\", include(\"announcements.api.v1.urls\")),\n path(\"\", include(\"events.api.v1.urls\")),\n", "issue": "Remove ObtainThaliaAuthToken view\n### Describe the change\r\nRemove https://github.com/svthalia/concrexit/blob/4b664220b406361245e153433288ffbcf9d6f4a9/website/members/views.py#L35\r\n\r\n\r\n### Motivation\r\nWe don't use it \r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.urls import include, path\n\nfrom rest_framework.schemas import get_schema_view\n\nfrom members.views import ObtainThaliaAuthToken\nfrom thaliawebsite.api.openapi import OAuthSchemaGenerator\n\napp_name = \"thaliawebsite\"\n\nurlpatterns = [\n path(\"token-auth/\", ObtainThaliaAuthToken.as_view()),\n path(\"\", include(\"activemembers.api.v1.urls\")),\n path(\"\", include(\"announcements.api.v1.urls\")),\n path(\"\", include(\"events.api.v1.urls\")),\n path(\"\", include(\"members.api.v1.urls\")),\n path(\"\", include(\"partners.api.v1.urls\")),\n path(\"\", include(\"pizzas.api.v1.urls\")),\n path(\"\", include(\"photos.api.v1.urls\")),\n path(\"\", include(\"pushnotifications.api.v1.urls\")),\n path(\"\", include(\"payments.api.v1.urls\")),\n path(\n \"schema\",\n get_schema_view(\n title=\"API v1\",\n version=settings.SOURCE_COMMIT,\n url=\"/api/v1/\",\n urlconf=\"thaliawebsite.api.v1.urls\",\n generator_class=OAuthSchemaGenerator,\n ),\n name=\"schema\",\n ),\n]\n", "path": "website/thaliawebsite/api/v1/urls.py"}, {"content": "\"\"\"Views provided by the members package.\"\"\"\nimport json\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.db.models import Q, QuerySet\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import CreateView, DetailView, UpdateView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView, View\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.response import Response\n\nimport activemembers.services as activemembers_services\nimport events.services as event_services\nimport pizzas.services\nfrom members import emails, services\nfrom members.decorators import membership_required\nfrom members.models import EmailChange, Member, Membership, Profile\nfrom thaliawebsite.views import PagedView\nfrom utils.media.services import fetch_thumbnails_db\nfrom utils.snippets import datetime_to_lectureyear\n\nfrom . import models\nfrom .forms import ProfileForm\nfrom .services import member_achievements, member_societies\n\n\nclass ObtainThaliaAuthToken(ObtainAuthToken):\n \"\"\"Custom override of the AuthToken view to force lowercase the username.\"\"\"\n\n def post(self, request, *args, **kwargs) -> HttpResponse:\n serializer = self.serializer_class(\n data={\n \"username\": request.data.get(\"username\").lower()\n if \"username\" in request.data\n else None,\n \"password\": request.data.get(\"password\"),\n },\n context={\"request\": request},\n )\n\n if not serializer.is_valid():\n return Response({\"error\": \"Unauthorized\"}, status=401)\n\n user = serializer.validated_data[\"user\"]\n token, _ = Token.objects.get_or_create(user=user)\n return Response({\"token\": token.key})\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass MembersIndex(PagedView):\n \"\"\"View that renders the members overview.\"\"\"\n\n model = Member\n paginate_by = 28\n template_name = \"members/index.html\"\n context_object_name = \"members\"\n keywords = None\n query_filter = \"\"\n year_range = []\n\n def setup(self, request, *args, **kwargs) -> None:\n super().setup(request, *args, **kwargs)\n current_lectureyear = datetime_to_lectureyear(date.today())\n self.year_range = list(\n reversed(range(current_lectureyear - 5, current_lectureyear + 1))\n )\n self.keywords = request.GET.get(\"keywords\", \"\").split() or None\n self.query_filter = kwargs.get(\"filter\", None)\n\n def get_queryset(self) -> QuerySet:\n memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n members_query = ~Q(id=None)\n\n if self.query_filter and self.query_filter.isdigit():\n members_query &= Q(profile__starting_year=int(self.query_filter))\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"older\":\n members_query &= Q(profile__starting_year__lt=self.year_range[-1])\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"former\":\n # Filter out all current active memberships\n memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n # Members_query contains users that are not currently (honorary)member\n elif self.query_filter == \"benefactors\":\n memberships_query &= Q(type=Membership.BENEFACTOR)\n elif self.query_filter == \"honorary\":\n memberships_query = Q(until__gt=datetime.now().date()) | Q(until=None)\n memberships_query &= Q(type=Membership.HONORARY)\n\n if self.keywords:\n for key in self.keywords:\n # Works because relevant options all have `nick` in their key\n members_query &= (\n (\n Q(profile__nickname__icontains=key)\n & Q(profile__display_name_preference__contains=\"nick\")\n )\n | Q(first_name__icontains=key)\n | Q(last_name__icontains=key)\n | Q(username__icontains=key)\n )\n\n if self.query_filter == \"former\":\n memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n all_memberships = Membership.objects.all()\n # Only keep members that were once members, or are legacy users\n # that do not have any memberships at all\n members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n pk__in=all_memberships.values(\"user__pk\")\n )\n else:\n memberships = Membership.objects.filter(memberships_query)\n members_query &= Q(pk__in=memberships.values(\"user__pk\"))\n members = (\n Member.objects.filter(members_query)\n .order_by(\"first_name\")\n .select_related(\"profile\")\n )\n return members\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"filter\": self.query_filter,\n \"year_range\": self.year_range,\n \"keywords\": self.keywords,\n }\n )\n\n fetch_thumbnails_db(\n [x.profile.photo for x in context[\"object_list\"] if x.profile.photo]\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass ProfileDetailView(DetailView):\n \"\"\"View that renders a member's profile.\"\"\"\n\n context_object_name = \"member\"\n model = Member\n template_name = \"members/user/profile.html\"\n\n def setup(self, request, *args, **kwargs) -> None:\n if \"pk\" not in kwargs and request.member:\n kwargs[\"pk\"] = request.member.pk\n super().setup(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n member = context[\"member\"]\n\n achievements = member_achievements(member)\n societies = member_societies(member)\n\n membership = member.current_membership\n membership_type = _(\"Unknown membership history\")\n if membership:\n membership_type = membership.get_type_display()\n elif member.has_been_honorary_member():\n membership_type = _(\"Former honorary member\")\n elif member.has_been_member():\n membership_type = _(\"Former member\")\n elif member.latest_membership:\n membership_type = _(\"Former benefactor\")\n\n context.update(\n {\n \"achievements\": achievements,\n \"societies\": societies,\n \"membership_type\": membership_type,\n }\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass UserProfileUpdateView(SuccessMessageMixin, UpdateView):\n \"\"\"View that allows a user to update their profile.\"\"\"\n\n template_name = \"members/user/edit_profile.html\"\n model = Profile\n form_class = ProfileForm\n success_url = reverse_lazy(\"members:edit-profile\")\n success_message = _(\"Your profile has been updated successfully.\")\n\n def get_object(self, queryset=None) -> Profile:\n return get_object_or_404(models.Profile, user=self.request.user)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass StatisticsView(TemplateView):\n \"\"\"View that renders the statistics page.\"\"\"\n\n template_name = \"members/statistics.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"total_members\": models.Member.current_members.count(),\n \"cohort_sizes\": json.dumps(services.gen_stats_year()),\n \"member_type_distribution\": json.dumps(\n services.gen_stats_member_type()\n ),\n \"committee_sizes\": json.dumps(\n activemembers_services.generate_statistics()\n ),\n \"event_categories\": json.dumps(\n event_services.generate_category_statistics()\n ),\n \"total_pizza_orders\": json.dumps(\n pizzas.services.gen_stats_pizza_orders()\n ),\n \"active_members\": json.dumps(services.gen_stats_active_members()),\n }\n )\n\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeFormView(CreateView):\n \"\"\"View that renders the email change form.\"\"\"\n\n model = EmailChange\n fields = [\"email\", \"member\"]\n template_name = \"members/user/email_change.html\"\n\n def get_initial(self) -> dict:\n initial = super().get_initial()\n initial[\"email\"] = self.request.member.email\n return initial\n\n def post(self, request, *args, **kwargs) -> HttpResponse:\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form) -> HttpResponse:\n change_request = form.save()\n emails.send_email_change_confirmation_messages(change_request)\n return TemplateResponse(\n request=self.request, template=\"members/user/email_change_requested.html\"\n )\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeConfirmView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and confirms the old email address.\"\"\"\n\n template_name = \"members/user/email_change_confirmed.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(confirm_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(confirm_key=kwargs[\"key\"])\n\n services.confirm_email_change(change_request)\n\n return self.render_to_response({})\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeVerifyView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and verifies the new email address.\"\"\"\n\n template_name = \"members/user/email_change_verified.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(verify_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(verify_key=kwargs[\"key\"])\n\n services.verify_email_change(change_request)\n\n return self.render_to_response({})\n", "path": "website/members/views.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.urls import include, path\n\nfrom rest_framework.schemas import get_schema_view\n\nfrom thaliawebsite.api.openapi import OAuthSchemaGenerator\n\napp_name = \"thaliawebsite\"\n\nurlpatterns = [\n path(\"\", include(\"activemembers.api.v1.urls\")),\n path(\"\", include(\"announcements.api.v1.urls\")),\n path(\"\", include(\"events.api.v1.urls\")),\n path(\"\", include(\"members.api.v1.urls\")),\n path(\"\", include(\"partners.api.v1.urls\")),\n path(\"\", include(\"pizzas.api.v1.urls\")),\n path(\"\", include(\"photos.api.v1.urls\")),\n path(\"\", include(\"pushnotifications.api.v1.urls\")),\n path(\"\", include(\"payments.api.v1.urls\")),\n path(\n \"schema\",\n get_schema_view(\n title=\"API v1\",\n version=settings.SOURCE_COMMIT,\n url=\"/api/v1/\",\n urlconf=\"thaliawebsite.api.v1.urls\",\n generator_class=OAuthSchemaGenerator,\n ),\n name=\"schema\",\n ),\n]\n", "path": "website/thaliawebsite/api/v1/urls.py"}, {"content": "\"\"\"Views provided by the members package.\"\"\"\nimport json\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.db.models import Q, QuerySet\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import CreateView, DetailView, UpdateView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView, View\n\nimport activemembers.services as activemembers_services\nimport events.services as event_services\nimport pizzas.services\nfrom members import emails, services\nfrom members.decorators import membership_required\nfrom members.models import EmailChange, Member, Membership, Profile\nfrom thaliawebsite.views import PagedView\nfrom utils.media.services import fetch_thumbnails_db\nfrom utils.snippets import datetime_to_lectureyear\n\nfrom . import models\nfrom .forms import ProfileForm\nfrom .services import member_achievements, member_societies\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass MembersIndex(PagedView):\n \"\"\"View that renders the members overview.\"\"\"\n\n model = Member\n paginate_by = 28\n template_name = \"members/index.html\"\n context_object_name = \"members\"\n keywords = None\n query_filter = \"\"\n year_range = []\n\n def setup(self, request, *args, **kwargs) -> None:\n super().setup(request, *args, **kwargs)\n current_lectureyear = datetime_to_lectureyear(date.today())\n self.year_range = list(\n reversed(range(current_lectureyear - 5, current_lectureyear + 1))\n )\n self.keywords = request.GET.get(\"keywords\", \"\").split() or None\n self.query_filter = kwargs.get(\"filter\", None)\n\n def get_queryset(self) -> QuerySet:\n memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n members_query = ~Q(id=None)\n\n if self.query_filter and self.query_filter.isdigit():\n members_query &= Q(profile__starting_year=int(self.query_filter))\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"older\":\n members_query &= Q(profile__starting_year__lt=self.year_range[-1])\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"former\":\n # Filter out all current active memberships\n memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n # Members_query contains users that are not currently (honorary)member\n elif self.query_filter == \"benefactors\":\n memberships_query &= Q(type=Membership.BENEFACTOR)\n elif self.query_filter == \"honorary\":\n memberships_query = Q(until__gt=datetime.now().date()) | Q(until=None)\n memberships_query &= Q(type=Membership.HONORARY)\n\n if self.keywords:\n for key in self.keywords:\n # Works because relevant options all have `nick` in their key\n members_query &= (\n (\n Q(profile__nickname__icontains=key)\n & Q(profile__display_name_preference__contains=\"nick\")\n )\n | Q(first_name__icontains=key)\n | Q(last_name__icontains=key)\n | Q(username__icontains=key)\n )\n\n if self.query_filter == \"former\":\n memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n all_memberships = Membership.objects.all()\n # Only keep members that were once members, or are legacy users\n # that do not have any memberships at all\n members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n pk__in=all_memberships.values(\"user__pk\")\n )\n else:\n memberships = Membership.objects.filter(memberships_query)\n members_query &= Q(pk__in=memberships.values(\"user__pk\"))\n members = (\n Member.objects.filter(members_query)\n .order_by(\"first_name\")\n .select_related(\"profile\")\n )\n return members\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"filter\": self.query_filter,\n \"year_range\": self.year_range,\n \"keywords\": self.keywords,\n }\n )\n\n fetch_thumbnails_db(\n [x.profile.photo for x in context[\"object_list\"] if x.profile.photo]\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass ProfileDetailView(DetailView):\n \"\"\"View that renders a member's profile.\"\"\"\n\n context_object_name = \"member\"\n model = Member\n template_name = \"members/user/profile.html\"\n\n def setup(self, request, *args, **kwargs) -> None:\n if \"pk\" not in kwargs and request.member:\n kwargs[\"pk\"] = request.member.pk\n super().setup(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n member = context[\"member\"]\n\n achievements = member_achievements(member)\n societies = member_societies(member)\n\n membership = member.current_membership\n membership_type = _(\"Unknown membership history\")\n if membership:\n membership_type = membership.get_type_display()\n elif member.has_been_honorary_member():\n membership_type = _(\"Former honorary member\")\n elif member.has_been_member():\n membership_type = _(\"Former member\")\n elif member.latest_membership:\n membership_type = _(\"Former benefactor\")\n\n context.update(\n {\n \"achievements\": achievements,\n \"societies\": societies,\n \"membership_type\": membership_type,\n }\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass UserProfileUpdateView(SuccessMessageMixin, UpdateView):\n \"\"\"View that allows a user to update their profile.\"\"\"\n\n template_name = \"members/user/edit_profile.html\"\n model = Profile\n form_class = ProfileForm\n success_url = reverse_lazy(\"members:edit-profile\")\n success_message = _(\"Your profile has been updated successfully.\")\n\n def get_object(self, queryset=None) -> Profile:\n return get_object_or_404(models.Profile, user=self.request.user)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass StatisticsView(TemplateView):\n \"\"\"View that renders the statistics page.\"\"\"\n\n template_name = \"members/statistics.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"total_members\": models.Member.current_members.count(),\n \"cohort_sizes\": json.dumps(services.gen_stats_year()),\n \"member_type_distribution\": json.dumps(\n services.gen_stats_member_type()\n ),\n \"committee_sizes\": json.dumps(\n activemembers_services.generate_statistics()\n ),\n \"event_categories\": json.dumps(\n event_services.generate_category_statistics()\n ),\n \"total_pizza_orders\": json.dumps(\n pizzas.services.gen_stats_pizza_orders()\n ),\n \"active_members\": json.dumps(services.gen_stats_active_members()),\n }\n )\n\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeFormView(CreateView):\n \"\"\"View that renders the email change form.\"\"\"\n\n model = EmailChange\n fields = [\"email\", \"member\"]\n template_name = \"members/user/email_change.html\"\n\n def get_initial(self) -> dict:\n initial = super().get_initial()\n initial[\"email\"] = self.request.member.email\n return initial\n\n def post(self, request, *args, **kwargs) -> HttpResponse:\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form) -> HttpResponse:\n change_request = form.save()\n emails.send_email_change_confirmation_messages(change_request)\n return TemplateResponse(\n request=self.request, template=\"members/user/email_change_requested.html\"\n )\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeConfirmView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and confirms the old email address.\"\"\"\n\n template_name = \"members/user/email_change_confirmed.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(confirm_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(confirm_key=kwargs[\"key\"])\n\n services.confirm_email_change(change_request)\n\n return self.render_to_response({})\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeVerifyView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and verifies the new email address.\"\"\"\n\n template_name = \"members/user/email_change_verified.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(verify_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(verify_key=kwargs[\"key\"])\n\n services.verify_email_change(change_request)\n\n return self.render_to_response({})\n", "path": "website/members/views.py"}]} | 3,697 | 523 |
gh_patches_debug_38287 | rasdani/github-patches | git_diff | microsoft__MLOS-358 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
flake8 and/or prettier setup for devcontainer
Per comments in #354 and #340
We should "just" add flake8 and some type of auto prettier to the devcontainer setup so that those types of things are caught during dev cycle instead of review nits as much as possible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlos_core/setup.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Setup instructions for the mlos_core package.
7 """
8
9 from itertools import chain
10 from logging import warning
11 from typing import Dict, List
12
13 from setuptools import setup, find_packages
14
15 from _version import _VERSION # pylint: disable=import-private-name
16
17 try:
18 from setuptools_scm import get_version
19 version = get_version(root='..', relative_to=__file__)
20 if version is not None:
21 _VERSION = version
22 except ImportError:
23 warning("setuptools_scm not found, using version from _version.py")
24 except LookupError as e:
25 warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
26
27
28 extra_requires: Dict[str, List[str]] = {
29 'emukit': ['emukit'],
30 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)
31 }
32
33 # construct special 'full' extra that adds requirements for all built-in
34 # backend integrations and additional extra features.
35 extra_requires['full'] = list(set(chain(*extra_requires.values())))
36
37 extra_requires['full-tests'] = extra_requires['full'] + [
38 'pytest',
39 'pytest-forked',
40 'pytest-xdist',
41 'pytest-cov',
42 'pytest-local-badge',
43 ]
44
45 # pylint: disable=duplicate-code
46 MODULE_BASE_NAME = 'mlos_core'
47 setup(
48 name='mlos-core',
49 version=_VERSION,
50 packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
51 package_data={
52 '': ['py.typed', '**/*.pyi'],
53 },
54 install_requires=[
55 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
56 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
57 'scipy>=1.3.2',
58 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
59 'pandas>=1.0.3',
60 'ConfigSpace>=0.6.1',
61 ],
62 extras_require=extra_requires,
63 author='Microsoft',
64 author_email='[email protected]',
65 description=('MLOS Core Python interface for parameter optimization.'),
66 license='MIT',
67 keywords='',
68 url='https://aka.ms/mlos-core',
69 python_requires='>=3.8',
70 )
71
```
Path: `mlos_bench/setup.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Setup instructions for the mlos_bench package.
7 """
8
9 from logging import warning
10 from itertools import chain
11 from typing import Dict, List
12
13 from setuptools import setup, find_packages
14
15 from _version import _VERSION # pylint: disable=import-private-name
16
17 try:
18 from setuptools_scm import get_version
19 version = get_version(root='..', relative_to=__file__)
20 if version is not None:
21 _VERSION = version
22 except ImportError:
23 warning("setuptools_scm not found, using version from _version.py")
24 except LookupError as e:
25 warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
26
27
28 extra_requires: Dict[str, List[str]] = {
29 # Additional tools for extra functionality.
30 'azure': ['azure-storage-file-share'],
31 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
32 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],
33 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],
34 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.
35 # Transitive extra_requires from mlos-core.
36 'emukit': ['emukit'],
37 'skopt': ['scikit-optimize'],
38 }
39
40 # construct special 'full' extra that adds requirements for all built-in
41 # backend integrations and additional extra features.
42 extra_requires['full'] = list(set(chain(*extra_requires.values())))
43
44 extra_requires['full-tests'] = extra_requires['full'] + [
45 'pytest',
46 'pytest-forked',
47 'pytest-xdist',
48 'pytest-cov',
49 'pytest-local-badge',
50 ]
51
52 # pylint: disable=duplicate-code
53 MODULE_BASE_NAME = 'mlos_bench'
54 setup(
55 name='mlos-bench',
56 version=_VERSION,
57 packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
58 package_data={
59 '': ['py.typed', '**/*.pyi'],
60 'mlos_bench': [
61 'config/**/*.md',
62 'config/**/*.jsonc',
63 'config/**/*.json',
64 'config/**/*.py',
65 'config/**/*.sh',
66 'config/**/*.cmd',
67 'config/**/*.ps1',
68 ],
69 },
70 entry_points={
71 'console_scripts': [
72 'mlos_bench = mlos_bench.run:_main',
73 ],
74 },
75 install_requires=[
76 'mlos-core==' + _VERSION,
77 'requests',
78 'json5',
79 'jsonschema',
80 'importlib_resources;python_version<"3.10"',
81 ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.
82 extras_require=extra_requires,
83 author='Microsoft',
84 author_email='[email protected]',
85 description=('MLOS Bench Python interface for benchmark automation and optimization.'),
86 license='MIT',
87 keywords='',
88 url='https://aka.ms/mlos-core',
89 python_requires='>=3.8',
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlos_bench/setup.py b/mlos_bench/setup.py
--- a/mlos_bench/setup.py
+++ b/mlos_bench/setup.py
@@ -18,14 +18,14 @@
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
- _VERSION = version
+ _VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
-extra_requires: Dict[str, List[str]] = {
+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
# Additional tools for extra functionality.
'azure': ['azure-storage-file-share'],
'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
diff --git a/mlos_core/setup.py b/mlos_core/setup.py
--- a/mlos_core/setup.py
+++ b/mlos_core/setup.py
@@ -18,14 +18,14 @@
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
- _VERSION = version
+ _VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
-extra_requires: Dict[str, List[str]] = {
+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
'emukit': ['emukit'],
'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)
}
@@ -52,10 +52,10 @@
'': ['py.typed', '**/*.pyi'],
},
install_requires=[
- 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
- 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
+ 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
+ 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
'scipy>=1.3.2',
- 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
+ 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
'pandas>=1.0.3',
'ConfigSpace>=0.6.1',
],
| {"golden_diff": "diff --git a/mlos_bench/setup.py b/mlos_bench/setup.py\n--- a/mlos_bench/setup.py\n+++ b/mlos_bench/setup.py\n@@ -18,14 +18,14 @@\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n- _VERSION = version\n+ _VERSION = version # noqa: F811\n except ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\n except LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n \n \n-extra_requires: Dict[str, List[str]] = {\n+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\ndiff --git a/mlos_core/setup.py b/mlos_core/setup.py\n--- a/mlos_core/setup.py\n+++ b/mlos_core/setup.py\n@@ -18,14 +18,14 @@\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n- _VERSION = version\n+ _VERSION = version # noqa: F811\n except ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\n except LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n \n \n-extra_requires: Dict[str, List[str]] = {\n+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n }\n@@ -52,10 +52,10 @@\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n- 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n- 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n+ 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n+ 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n- 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n+ 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n", "issue": "flake8 and/or prettier setup for devcontainer\nPer comments in #354 and #340 \r\nWe should \"just\" add flake8 and some type of auto prettier to the devcontainer setup so that those types of things are caught during dev cycle instead of review nits as much as possible.\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_core package.\n\"\"\"\n\nfrom itertools import chain\nfrom logging import warning\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = {\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_core'\nsetup(\n name='mlos-core',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Core Python interface for parameter optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_core/setup.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_bench package.\n\"\"\"\n\nfrom logging import warning\nfrom itertools import chain\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = {\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\n 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],\n 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],\n 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.\n # Transitive extra_requires from mlos-core.\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize'],\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_bench'\nsetup(\n name='mlos-bench',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n 'mlos_bench': [\n 'config/**/*.md',\n 'config/**/*.jsonc',\n 'config/**/*.json',\n 'config/**/*.py',\n 'config/**/*.sh',\n 'config/**/*.cmd',\n 'config/**/*.ps1',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'mlos_bench = mlos_bench.run:_main',\n ],\n },\n install_requires=[\n 'mlos-core==' + _VERSION,\n 'requests',\n 'json5',\n 'jsonschema',\n 'importlib_resources;python_version<\"3.10\"',\n ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Bench Python interface for benchmark automation and optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_bench/setup.py"}], "after_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_core package.\n\"\"\"\n\nfrom itertools import chain\nfrom logging import warning\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version # noqa: F811\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_core'\nsetup(\n name='mlos-core',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Core Python interface for parameter optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_core/setup.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_bench package.\n\"\"\"\n\nfrom logging import warning\nfrom itertools import chain\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version # noqa: F811\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\n 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],\n 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],\n 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.\n # Transitive extra_requires from mlos-core.\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize'],\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_bench'\nsetup(\n name='mlos-bench',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n 'mlos_bench': [\n 'config/**/*.md',\n 'config/**/*.jsonc',\n 'config/**/*.json',\n 'config/**/*.py',\n 'config/**/*.sh',\n 'config/**/*.cmd',\n 'config/**/*.ps1',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'mlos_bench = mlos_bench.run:_main',\n ],\n },\n install_requires=[\n 'mlos-core==' + _VERSION,\n 'requests',\n 'json5',\n 'jsonschema',\n 'importlib_resources;python_version<\"3.10\"',\n ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Bench Python interface for benchmark automation and optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_bench/setup.py"}]} | 1,935 | 744 |
gh_patches_debug_13487 | rasdani/github-patches | git_diff | pytorch__vision-2143 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Imagefolder none descriptive error message
## 🐛 Bug
You get an weird python error:
`TypeError: can only join an iterable`
If you make an Imagefolder dataset with no samples in the directory and a is_valid_file. Because the extensions variable is set to none because is_valid_file which causes the TypeError.
## To Reproduce
Steps to reproduce the behavior:
1. Use Imagefolder in torchvision.datasets with no samples in the directory and a is_valid_file function.
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
is_type = lambda x: x.startswith(name)
print(dir_in)
data_set = datasets.ImageFolder(root=dir_in,
transform=transforms.Compose([transforms.CenterCrop(IMG_SIZE),
# transforms.Resize(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)]),
is_valid_file=is_type)
```
## Expected behavior
Should display an RunTimeError saying that no samples were found.
## Environment
Please copy and paste the output from our
[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py)
(or fill out the checklist below manually).
PyTorch version: 1.5.0
Is debug build: No
CUDA used to build PyTorch: 10.2
OS: Manjaro Linux
GCC version: (Arch Linux 9.3.0-1) 9.3.0
CMake version: version 3.17.1
Python version: 3.8
Is CUDA available: Yes
CUDA runtime version: 10.0.130
GPU models and configuration: GPU 0: Quadro M1200
Nvidia driver version: 440.82
cuDNN version: /usr/lib/libcudnn.so.7.6.5
Versions of relevant libraries:
[pip3] numpy==1.18.3
[pip3] torch==1.5.0
[pip3] torch-utils==0.1.2
[pip3] torchvision==0.5.0
[conda] Could not collect
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/folder.py`
Content:
```
1 from .vision import VisionDataset
2
3 from PIL import Image
4
5 import os
6 import os.path
7
8
9 def has_file_allowed_extension(filename, extensions):
10 """Checks if a file is an allowed extension.
11
12 Args:
13 filename (string): path to a file
14 extensions (tuple of strings): extensions to consider (lowercase)
15
16 Returns:
17 bool: True if the filename ends with one of given extensions
18 """
19 return filename.lower().endswith(extensions)
20
21
22 def is_image_file(filename):
23 """Checks if a file is an allowed image extension.
24
25 Args:
26 filename (string): path to a file
27
28 Returns:
29 bool: True if the filename ends with a known image extension
30 """
31 return has_file_allowed_extension(filename, IMG_EXTENSIONS)
32
33
34 def make_dataset(directory, class_to_idx, extensions=None, is_valid_file=None):
35 instances = []
36 directory = os.path.expanduser(directory)
37 both_none = extensions is None and is_valid_file is None
38 both_something = extensions is not None and is_valid_file is not None
39 if both_none or both_something:
40 raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
41 if extensions is not None:
42 def is_valid_file(x):
43 return has_file_allowed_extension(x, extensions)
44 for target_class in sorted(class_to_idx.keys()):
45 class_index = class_to_idx[target_class]
46 target_dir = os.path.join(directory, target_class)
47 if not os.path.isdir(target_dir):
48 continue
49 for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
50 for fname in sorted(fnames):
51 path = os.path.join(root, fname)
52 if is_valid_file(path):
53 item = path, class_index
54 instances.append(item)
55 return instances
56
57
58 class DatasetFolder(VisionDataset):
59 """A generic data loader where the samples are arranged in this way: ::
60
61 root/class_x/xxx.ext
62 root/class_x/xxy.ext
63 root/class_x/xxz.ext
64
65 root/class_y/123.ext
66 root/class_y/nsdf3.ext
67 root/class_y/asd932_.ext
68
69 Args:
70 root (string): Root directory path.
71 loader (callable): A function to load a sample given its path.
72 extensions (tuple[string]): A list of allowed extensions.
73 both extensions and is_valid_file should not be passed.
74 transform (callable, optional): A function/transform that takes in
75 a sample and returns a transformed version.
76 E.g, ``transforms.RandomCrop`` for images.
77 target_transform (callable, optional): A function/transform that takes
78 in the target and transforms it.
79 is_valid_file (callable, optional): A function that takes path of a file
80 and check if the file is a valid file (used to check of corrupt files)
81 both extensions and is_valid_file should not be passed.
82
83 Attributes:
84 classes (list): List of the class names sorted alphabetically.
85 class_to_idx (dict): Dict with items (class_name, class_index).
86 samples (list): List of (sample path, class_index) tuples
87 targets (list): The class_index value for each image in the dataset
88 """
89
90 def __init__(self, root, loader, extensions=None, transform=None,
91 target_transform=None, is_valid_file=None):
92 super(DatasetFolder, self).__init__(root, transform=transform,
93 target_transform=target_transform)
94 classes, class_to_idx = self._find_classes(self.root)
95 samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
96 if len(samples) == 0:
97 raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
98 "Supported extensions are: " + ",".join(extensions)))
99
100 self.loader = loader
101 self.extensions = extensions
102
103 self.classes = classes
104 self.class_to_idx = class_to_idx
105 self.samples = samples
106 self.targets = [s[1] for s in samples]
107
108 def _find_classes(self, dir):
109 """
110 Finds the class folders in a dataset.
111
112 Args:
113 dir (string): Root directory path.
114
115 Returns:
116 tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
117
118 Ensures:
119 No class is a subdirectory of another.
120 """
121 classes = [d.name for d in os.scandir(dir) if d.is_dir()]
122 classes.sort()
123 class_to_idx = {classes[i]: i for i in range(len(classes))}
124 return classes, class_to_idx
125
126 def __getitem__(self, index):
127 """
128 Args:
129 index (int): Index
130
131 Returns:
132 tuple: (sample, target) where target is class_index of the target class.
133 """
134 path, target = self.samples[index]
135 sample = self.loader(path)
136 if self.transform is not None:
137 sample = self.transform(sample)
138 if self.target_transform is not None:
139 target = self.target_transform(target)
140
141 return sample, target
142
143 def __len__(self):
144 return len(self.samples)
145
146
147 IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
148
149
150 def pil_loader(path):
151 # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
152 with open(path, 'rb') as f:
153 img = Image.open(f)
154 return img.convert('RGB')
155
156
157 def accimage_loader(path):
158 import accimage
159 try:
160 return accimage.Image(path)
161 except IOError:
162 # Potentially a decoding problem, fall back to PIL.Image
163 return pil_loader(path)
164
165
166 def default_loader(path):
167 from torchvision import get_image_backend
168 if get_image_backend() == 'accimage':
169 return accimage_loader(path)
170 else:
171 return pil_loader(path)
172
173
174 class ImageFolder(DatasetFolder):
175 """A generic data loader where the images are arranged in this way: ::
176
177 root/dog/xxx.png
178 root/dog/xxy.png
179 root/dog/xxz.png
180
181 root/cat/123.png
182 root/cat/nsdf3.png
183 root/cat/asd932_.png
184
185 Args:
186 root (string): Root directory path.
187 transform (callable, optional): A function/transform that takes in an PIL image
188 and returns a transformed version. E.g, ``transforms.RandomCrop``
189 target_transform (callable, optional): A function/transform that takes in the
190 target and transforms it.
191 loader (callable, optional): A function to load an image given its path.
192 is_valid_file (callable, optional): A function that takes path of an Image file
193 and check if the file is a valid file (used to check of corrupt files)
194
195 Attributes:
196 classes (list): List of the class names sorted alphabetically.
197 class_to_idx (dict): Dict with items (class_name, class_index).
198 imgs (list): List of (image path, class_index) tuples
199 """
200
201 def __init__(self, root, transform=None, target_transform=None,
202 loader=default_loader, is_valid_file=None):
203 super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
204 transform=transform,
205 target_transform=target_transform,
206 is_valid_file=is_valid_file)
207 self.imgs = self.samples
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py
--- a/torchvision/datasets/folder.py
+++ b/torchvision/datasets/folder.py
@@ -94,8 +94,10 @@
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
- raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n"
- "Supported extensions are: " + ",".join(extensions)))
+ msg = "Found 0 files in subfolders of: {}\n".format(self.root)
+ if extensions is not None:
+ msg += "Supported extensions are: {}".format(",".join(extensions))
+ raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
| {"golden_diff": "diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py\n--- a/torchvision/datasets/folder.py\n+++ b/torchvision/datasets/folder.py\n@@ -94,8 +94,10 @@\n classes, class_to_idx = self._find_classes(self.root)\n samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)\n if len(samples) == 0:\n- raise (RuntimeError(\"Found 0 files in subfolders of: \" + self.root + \"\\n\"\n- \"Supported extensions are: \" + \",\".join(extensions)))\n+ msg = \"Found 0 files in subfolders of: {}\\n\".format(self.root)\n+ if extensions is not None:\n+ msg += \"Supported extensions are: {}\".format(\",\".join(extensions))\n+ raise RuntimeError(msg)\n \n self.loader = loader\n self.extensions = extensions\n", "issue": "Imagefolder none descriptive error message\n## \ud83d\udc1b Bug\r\n\r\nYou get an weird python error:\r\n`TypeError: can only join an iterable`\r\n\r\nIf you make an Imagefolder dataset with no samples in the directory and a is_valid_file. Because the extensions variable is set to none because is_valid_file which causes the TypeError.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Use Imagefolder in torchvision.datasets with no samples in the directory and a is_valid_file function.\r\n\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\n is_type = lambda x: x.startswith(name)\r\n print(dir_in)\r\n data_set = datasets.ImageFolder(root=dir_in,\r\n transform=transforms.Compose([transforms.CenterCrop(IMG_SIZE),\r\n # transforms.Resize(IMAGE_SIZE),\r\n transforms.ToTensor(),\r\n transforms.Normalize(0.5, 0.5)]),\r\n is_valid_file=is_type)\r\n```\r\n\r\n## Expected behavior\r\n\r\nShould display an RunTimeError saying that no samples were found.\r\n\r\n## Environment\r\n\r\nPlease copy and paste the output from our\r\n[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py)\r\n(or fill out the checklist below manually).\r\nPyTorch version: 1.5.0\r\nIs debug build: No\r\nCUDA used to build PyTorch: 10.2\r\n\r\nOS: Manjaro Linux\r\nGCC version: (Arch Linux 9.3.0-1) 9.3.0\r\nCMake version: version 3.17.1\r\n\r\nPython version: 3.8\r\nIs CUDA available: Yes\r\nCUDA runtime version: 10.0.130\r\nGPU models and configuration: GPU 0: Quadro M1200\r\nNvidia driver version: 440.82\r\ncuDNN version: /usr/lib/libcudnn.so.7.6.5\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.18.3\r\n[pip3] torch==1.5.0\r\n[pip3] torch-utils==0.1.2\r\n[pip3] torchvision==0.5.0\r\n[conda] Could not collect\n", "before_files": [{"content": "from .vision import VisionDataset\n\nfrom PIL import Image\n\nimport os\nimport os.path\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n return filename.lower().endswith(extensions)\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n return has_file_allowed_extension(filename, IMG_EXTENSIONS)\n\n\ndef make_dataset(directory, class_to_idx, extensions=None, is_valid_file=None):\n instances = []\n directory = os.path.expanduser(directory)\n both_none = extensions is None and is_valid_file is None\n both_something = extensions is not None and is_valid_file is not None\n if both_none or both_something:\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n def is_valid_file(x):\n return has_file_allowed_extension(x, extensions)\n for target_class in sorted(class_to_idx.keys()):\n class_index = class_to_idx[target_class]\n target_dir = os.path.join(directory, target_class)\n if not os.path.isdir(target_dir):\n continue\n for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n item = path, class_index\n instances.append(item)\n return instances\n\n\nclass DatasetFolder(VisionDataset):\n \"\"\"A generic data loader where the samples are arranged in this way: ::\n\n root/class_x/xxx.ext\n root/class_x/xxy.ext\n root/class_x/xxz.ext\n\n root/class_y/123.ext\n root/class_y/nsdf3.ext\n root/class_y/asd932_.ext\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n extensions (tuple[string]): A list of allowed extensions.\n both extensions and is_valid_file should not be passed.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n is_valid_file (callable, optional): A function that takes path of a file\n and check if the file is a valid file (used to check of corrupt files)\n both extensions and is_valid_file should not be passed.\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n samples (list): List of (sample path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, loader, extensions=None, transform=None,\n target_transform=None, is_valid_file=None):\n super(DatasetFolder, self).__init__(root, transform=transform,\n target_transform=target_transform)\n classes, class_to_idx = self._find_classes(self.root)\n samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)\n if len(samples) == 0:\n raise (RuntimeError(\"Found 0 files in subfolders of: \" + self.root + \"\\n\"\n \"Supported extensions are: \" + \",\".join(extensions)))\n\n self.loader = loader\n self.extensions = extensions\n\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.samples = samples\n self.targets = [s[1] for s in samples]\n\n def _find_classes(self, dir):\n \"\"\"\n Finds the class folders in a dataset.\n\n Args:\n dir (string): Root directory path.\n\n Returns:\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\n Ensures:\n No class is a subdirectory of another.\n \"\"\"\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return sample, target\n\n def __len__(self):\n return len(self.samples)\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(DatasetFolder):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n is_valid_file (callable, optional): A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader, is_valid_file=None):\n super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,\n transform=transform,\n target_transform=target_transform,\n is_valid_file=is_valid_file)\n self.imgs = self.samples\n", "path": "torchvision/datasets/folder.py"}], "after_files": [{"content": "from .vision import VisionDataset\n\nfrom PIL import Image\n\nimport os\nimport os.path\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n return filename.lower().endswith(extensions)\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n return has_file_allowed_extension(filename, IMG_EXTENSIONS)\n\n\ndef make_dataset(directory, class_to_idx, extensions=None, is_valid_file=None):\n instances = []\n directory = os.path.expanduser(directory)\n both_none = extensions is None and is_valid_file is None\n both_something = extensions is not None and is_valid_file is not None\n if both_none or both_something:\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n def is_valid_file(x):\n return has_file_allowed_extension(x, extensions)\n for target_class in sorted(class_to_idx.keys()):\n class_index = class_to_idx[target_class]\n target_dir = os.path.join(directory, target_class)\n if not os.path.isdir(target_dir):\n continue\n for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n item = path, class_index\n instances.append(item)\n return instances\n\n\nclass DatasetFolder(VisionDataset):\n \"\"\"A generic data loader where the samples are arranged in this way: ::\n\n root/class_x/xxx.ext\n root/class_x/xxy.ext\n root/class_x/xxz.ext\n\n root/class_y/123.ext\n root/class_y/nsdf3.ext\n root/class_y/asd932_.ext\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n extensions (tuple[string]): A list of allowed extensions.\n both extensions and is_valid_file should not be passed.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n is_valid_file (callable, optional): A function that takes path of a file\n and check if the file is a valid file (used to check of corrupt files)\n both extensions and is_valid_file should not be passed.\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n samples (list): List of (sample path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, loader, extensions=None, transform=None,\n target_transform=None, is_valid_file=None):\n super(DatasetFolder, self).__init__(root, transform=transform,\n target_transform=target_transform)\n classes, class_to_idx = self._find_classes(self.root)\n samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)\n if len(samples) == 0:\n msg = \"Found 0 files in subfolders of: {}\\n\".format(self.root)\n if extensions is not None:\n msg += \"Supported extensions are: {}\".format(\",\".join(extensions))\n raise RuntimeError(msg)\n\n self.loader = loader\n self.extensions = extensions\n\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.samples = samples\n self.targets = [s[1] for s in samples]\n\n def _find_classes(self, dir):\n \"\"\"\n Finds the class folders in a dataset.\n\n Args:\n dir (string): Root directory path.\n\n Returns:\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\n Ensures:\n No class is a subdirectory of another.\n \"\"\"\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return sample, target\n\n def __len__(self):\n return len(self.samples)\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(DatasetFolder):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n is_valid_file (callable, optional): A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader, is_valid_file=None):\n super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,\n transform=transform,\n target_transform=target_transform,\n is_valid_file=is_valid_file)\n self.imgs = self.samples\n", "path": "torchvision/datasets/folder.py"}]} | 2,890 | 202 |
gh_patches_debug_13934 | rasdani/github-patches | git_diff | saulpw__visidata-1813 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature request: Provide command `reopen-closed-sheet` like Chrome's Ctrl-Shift-T
It often happens that I close a sheet by mistake and immediately curse because I've already done a bunch of manipulations on this tab that I'll now have to repeat. (I know I can guard a sheet but that doesn't fit my workflow.)
I wish VisiData had a feature like Chrome's Ctrl-Shift-T. Basically a command that just opens that sheet that was closed most recently, with all the manipulations that I've done to it.
Of course, this command is challenging because it means VisiData has to keep this information around. Maybe limit it to last 3 sheets.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/_open.py`
Content:
```
1 import os
2 import os.path
3
4 from visidata import VisiData, vd, Path, BaseSheet, TableSheet, TextSheet, SettableColumn
5
6
7 vd.option('filetype', '', 'specify file type', replay=True)
8
9
10 @VisiData.api
11 def inputFilename(vd, prompt, *args, **kwargs):
12 return vd.input(prompt, type="filename", *args, completer=_completeFilename, **kwargs).strip()
13
14
15 @VisiData.api
16 def inputPath(vd, *args, **kwargs):
17 return Path(vd.inputFilename(*args, **kwargs))
18
19
20 def _completeFilename(val, state):
21 i = val.rfind('/')
22 if i < 0: # no /
23 base = ''
24 partial = val
25 elif i == 0: # root /
26 base = '/'
27 partial = val[1:]
28 else:
29 base = val[:i]
30 partial = val[i+1:]
31
32 files = []
33 for f in os.listdir(Path(base or '.')):
34 if f.startswith(partial):
35 files.append(os.path.join(base, f))
36
37 files.sort()
38 return files[state%len(files)]
39
40 @VisiData.api
41 def guessFiletype(vd, p):
42 '''Call all vd.guess_<filetype>(p) functions and return best candidate sheet based on file contents.'''
43
44 guessfuncs = [getattr(vd, x) for x in dir(vd) if x.startswith('guess_')]
45 filetypes = []
46 for f in guessfuncs:
47 try:
48 filetype = f(p)
49 if filetype:
50 filetype['_guesser'] = f.__name__
51 filetypes.append(filetype)
52 except FileNotFoundError:
53 pass
54 except Exception as e:
55 vd.debug(f'{f.__name__}: {e}')
56
57 if filetypes:
58 return sorted(filetypes, key=lambda r: -r.get('_likelihood', 1))[0]
59
60
61 @VisiData.api
62 def guess_extension(vd, path):
63 # try auto-detect from extension
64 ext = path.suffix[1:].lower()
65 openfunc = getattr(vd, f'open_{ext}', vd.getGlobals().get(f'open_{ext}'))
66 if openfunc:
67 return dict(filetype=ext, _likelihood=3)
68
69
70 @VisiData.api
71 def openPath(vd, p, filetype=None, create=False):
72 '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.
73 If True, *create* will return a new, blank **Sheet** if file does not exist.'''
74 if p.scheme and not p.has_fp():
75 schemes = p.scheme.split('+')
76 openfuncname = 'openurl_' + schemes[-1]
77
78 openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)
79 if not openfunc:
80 vd.fail(f'no loader for url scheme: {p.scheme}')
81
82 return openfunc(p, filetype=filetype)
83
84 if not p.exists() and not create:
85 return None
86
87 if not filetype:
88 filetype = p.ext or vd.options.filetype
89
90 filetype = filetype.lower()
91
92 if not p.exists():
93 if not create:
94 return None
95 newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))
96 if not newfunc:
97 vd.warning('%s does not exist, creating new sheet' % p)
98 return vd.newSheet(p.name, 1, source=p)
99
100 vd.status('creating blank %s' % (p.given))
101 return newfunc(p)
102
103 openfuncname = 'open_' + filetype
104 openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))
105 if not openfunc:
106 opts = vd.guessFiletype(p)
107 if opts and 'filetype' in opts:
108 filetype = opts['filetype']
109 openfuncname = 'open_' + filetype
110 openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))
111 if not openfunc:
112 vd.error(f'guessed {filetype} but no {openfuncname}')
113
114 vs = openfunc(p)
115 for k, v in opts.items():
116 if k != 'filetype' and not k.startswith('_'):
117 setattr(vs.options, k, v)
118 vd.warning('guessed "%s" filetype based on contents' % opts['filetype'])
119 return vs
120
121 vd.warning('unknown "%s" filetype' % filetype)
122
123 filetype = 'txt'
124 openfunc = vd.open_txt
125
126 vd.status('opening %s as %s' % (p.given, filetype))
127
128 return openfunc(p)
129
130
131 @VisiData.api
132 def openSource(vd, p, filetype=None, create=False, **kwargs):
133 '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or "-" for stdin).
134 when true, *create* will return a blank sheet, if file does not exist.'''
135
136 if isinstance(p, BaseSheet):
137 return p
138
139 filetype = filetype or vd.options.getonly('filetype', str(p), '') #1710
140 filetype = filetype or vd.options.getonly('filetype', 'global', '')
141
142 vs = None
143 if isinstance(p, str):
144 if '://' in p:
145 vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse
146 elif p == '-':
147 vs = vd.openPath(vd.stdinSource, filetype=filetype)
148 else:
149 vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse
150 else:
151 vs = vd.openPath(p, filetype=filetype, create=create)
152
153 for optname, optval in kwargs.items():
154 vs.options[optname] = optval
155
156 return vs
157
158
159 #### enable external addons
160 @VisiData.api
161 def open_txt(vd, p):
162 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'
163 if p.exists(): #1611
164 with p.open_text(encoding=vd.options.encoding) as fp:
165 delimiter = vd.options.delimiter
166 try:
167 if delimiter and delimiter in next(fp): # peek at the first line
168 return vd.open_tsv(p) # TSV often have .txt extension
169 except StopIteration:
170 return TableSheet(p.name, columns=[SettableColumn()], source=p)
171 return TextSheet(p.name, source=p)
172
173
174 @VisiData.api
175 def loadInternalSheet(vd, cls, p, **kwargs):
176 'Load internal sheet of given class.'
177 vs = cls(p.name, source=p, **kwargs)
178 vd.options._set('encoding', 'utf8', vs)
179 if p.exists():
180 # vd.sheets.insert(0, vs) # broke replay with macros.reload()
181 vs.reload.__wrapped__(vs)
182 # vd.sheets.pop(0)
183 return vs
184
185
186 BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename("open: "), create=True))', 'Open file or URL')
187 TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f"file {cursorDisplay} does not exist"))', 'Open file or URL from path in current cell')
188
189 vd.addMenuItems('''
190 File > Open file/url > open-file
191 ''')
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/_open.py b/visidata/_open.py
--- a/visidata/_open.py
+++ b/visidata/_open.py
@@ -185,7 +185,9 @@
BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename("open: "), create=True))', 'Open file or URL')
TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f"file {cursorDisplay} does not exist"))', 'Open file or URL from path in current cell')
+BaseSheet.addCommand('gU', 'undo-last-quit', 'push(allSheets[-1])', 'reopen most recently closed sheet')
vd.addMenuItems('''
File > Open file/url > open-file
+ File > Reopen last closed > undo-last-quit
''')
| {"golden_diff": "diff --git a/visidata/_open.py b/visidata/_open.py\n--- a/visidata/_open.py\n+++ b/visidata/_open.py\n@@ -185,7 +185,9 @@\n \n BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\n TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f\"file {cursorDisplay} does not exist\"))', 'Open file or URL from path in current cell')\n+BaseSheet.addCommand('gU', 'undo-last-quit', 'push(allSheets[-1])', 'reopen most recently closed sheet')\n \n vd.addMenuItems('''\n File > Open file/url > open-file\n+ File > Reopen last closed > undo-last-quit\n ''')\n", "issue": "Feature request: Provide command `reopen-closed-sheet` like Chrome's Ctrl-Shift-T\nIt often happens that I close a sheet by mistake and immediately curse because I've already done a bunch of manipulations on this tab that I'll now have to repeat. (I know I can guard a sheet but that doesn't fit my workflow.)\r\n\r\nI wish VisiData had a feature like Chrome's Ctrl-Shift-T. Basically a command that just opens that sheet that was closed most recently, with all the manipulations that I've done to it. \r\n\r\nOf course, this command is challenging because it means VisiData has to keep this information around. Maybe limit it to last 3 sheets.\n", "before_files": [{"content": "import os\nimport os.path\n\nfrom visidata import VisiData, vd, Path, BaseSheet, TableSheet, TextSheet, SettableColumn\n\n\nvd.option('filetype', '', 'specify file type', replay=True)\n\n\[email protected]\ndef inputFilename(vd, prompt, *args, **kwargs):\n return vd.input(prompt, type=\"filename\", *args, completer=_completeFilename, **kwargs).strip()\n\n\[email protected]\ndef inputPath(vd, *args, **kwargs):\n return Path(vd.inputFilename(*args, **kwargs))\n\n\ndef _completeFilename(val, state):\n i = val.rfind('/')\n if i < 0: # no /\n base = ''\n partial = val\n elif i == 0: # root /\n base = '/'\n partial = val[1:]\n else:\n base = val[:i]\n partial = val[i+1:]\n\n files = []\n for f in os.listdir(Path(base or '.')):\n if f.startswith(partial):\n files.append(os.path.join(base, f))\n\n files.sort()\n return files[state%len(files)]\n\[email protected]\ndef guessFiletype(vd, p):\n '''Call all vd.guess_<filetype>(p) functions and return best candidate sheet based on file contents.'''\n\n guessfuncs = [getattr(vd, x) for x in dir(vd) if x.startswith('guess_')]\n filetypes = []\n for f in guessfuncs:\n try:\n filetype = f(p)\n if filetype:\n filetype['_guesser'] = f.__name__\n filetypes.append(filetype)\n except FileNotFoundError:\n pass\n except Exception as e:\n vd.debug(f'{f.__name__}: {e}')\n\n if filetypes:\n return sorted(filetypes, key=lambda r: -r.get('_likelihood', 1))[0]\n\n\[email protected]\ndef guess_extension(vd, path):\n # try auto-detect from extension\n ext = path.suffix[1:].lower()\n openfunc = getattr(vd, f'open_{ext}', vd.getGlobals().get(f'open_{ext}'))\n if openfunc:\n return dict(filetype=ext, _likelihood=3)\n\n\[email protected]\ndef openPath(vd, p, filetype=None, create=False):\n '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.\n If True, *create* will return a new, blank **Sheet** if file does not exist.'''\n if p.scheme and not p.has_fp():\n schemes = p.scheme.split('+')\n openfuncname = 'openurl_' + schemes[-1]\n\n openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)\n if not openfunc:\n vd.fail(f'no loader for url scheme: {p.scheme}')\n\n return openfunc(p, filetype=filetype)\n\n if not p.exists() and not create:\n return None\n\n if not filetype:\n filetype = p.ext or vd.options.filetype\n\n filetype = filetype.lower()\n\n if not p.exists():\n if not create:\n return None\n newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))\n if not newfunc:\n vd.warning('%s does not exist, creating new sheet' % p)\n return vd.newSheet(p.name, 1, source=p)\n\n vd.status('creating blank %s' % (p.given))\n return newfunc(p)\n\n openfuncname = 'open_' + filetype\n openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))\n if not openfunc:\n opts = vd.guessFiletype(p)\n if opts and 'filetype' in opts:\n filetype = opts['filetype']\n openfuncname = 'open_' + filetype\n openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))\n if not openfunc:\n vd.error(f'guessed {filetype} but no {openfuncname}')\n\n vs = openfunc(p)\n for k, v in opts.items():\n if k != 'filetype' and not k.startswith('_'):\n setattr(vs.options, k, v)\n vd.warning('guessed \"%s\" filetype based on contents' % opts['filetype'])\n return vs\n\n vd.warning('unknown \"%s\" filetype' % filetype)\n\n filetype = 'txt'\n openfunc = vd.open_txt\n\n vd.status('opening %s as %s' % (p.given, filetype))\n\n return openfunc(p)\n\n\[email protected]\ndef openSource(vd, p, filetype=None, create=False, **kwargs):\n '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or \"-\" for stdin).\n when true, *create* will return a blank sheet, if file does not exist.'''\n\n if isinstance(p, BaseSheet):\n return p\n\n filetype = filetype or vd.options.getonly('filetype', str(p), '') #1710\n filetype = filetype or vd.options.getonly('filetype', 'global', '')\n\n vs = None\n if isinstance(p, str):\n if '://' in p:\n vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse\n elif p == '-':\n vs = vd.openPath(vd.stdinSource, filetype=filetype)\n else:\n vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse\n else:\n vs = vd.openPath(p, filetype=filetype, create=create)\n\n for optname, optval in kwargs.items():\n vs.options[optname] = optval\n\n return vs\n\n\n#### enable external addons\[email protected]\ndef open_txt(vd, p):\n 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'\n if p.exists(): #1611\n with p.open_text(encoding=vd.options.encoding) as fp:\n delimiter = vd.options.delimiter\n try:\n if delimiter and delimiter in next(fp): # peek at the first line\n return vd.open_tsv(p) # TSV often have .txt extension\n except StopIteration:\n return TableSheet(p.name, columns=[SettableColumn()], source=p)\n return TextSheet(p.name, source=p)\n\n\[email protected]\ndef loadInternalSheet(vd, cls, p, **kwargs):\n 'Load internal sheet of given class.'\n vs = cls(p.name, source=p, **kwargs)\n vd.options._set('encoding', 'utf8', vs)\n if p.exists():\n# vd.sheets.insert(0, vs) # broke replay with macros.reload()\n vs.reload.__wrapped__(vs)\n# vd.sheets.pop(0)\n return vs\n\n\nBaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\nTableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f\"file {cursorDisplay} does not exist\"))', 'Open file or URL from path in current cell')\n\nvd.addMenuItems('''\n File > Open file/url > open-file\n''')\n", "path": "visidata/_open.py"}], "after_files": [{"content": "import os\nimport os.path\n\nfrom visidata import VisiData, vd, Path, BaseSheet, TableSheet, TextSheet, SettableColumn\n\n\nvd.option('filetype', '', 'specify file type', replay=True)\n\n\[email protected]\ndef inputFilename(vd, prompt, *args, **kwargs):\n return vd.input(prompt, type=\"filename\", *args, completer=_completeFilename, **kwargs).strip()\n\n\[email protected]\ndef inputPath(vd, *args, **kwargs):\n return Path(vd.inputFilename(*args, **kwargs))\n\n\ndef _completeFilename(val, state):\n i = val.rfind('/')\n if i < 0: # no /\n base = ''\n partial = val\n elif i == 0: # root /\n base = '/'\n partial = val[1:]\n else:\n base = val[:i]\n partial = val[i+1:]\n\n files = []\n for f in os.listdir(Path(base or '.')):\n if f.startswith(partial):\n files.append(os.path.join(base, f))\n\n files.sort()\n return files[state%len(files)]\n\[email protected]\ndef guessFiletype(vd, p):\n '''Call all vd.guess_<filetype>(p) functions and return best candidate sheet based on file contents.'''\n\n guessfuncs = [getattr(vd, x) for x in dir(vd) if x.startswith('guess_')]\n filetypes = []\n for f in guessfuncs:\n try:\n filetype = f(p)\n if filetype:\n filetype['_guesser'] = f.__name__\n filetypes.append(filetype)\n except FileNotFoundError:\n pass\n except Exception as e:\n vd.debug(f'{f.__name__}: {e}')\n\n if filetypes:\n return sorted(filetypes, key=lambda r: -r.get('_likelihood', 1))[0]\n\n\[email protected]\ndef guess_extension(vd, path):\n # try auto-detect from extension\n ext = path.suffix[1:].lower()\n openfunc = getattr(vd, f'open_{ext}', vd.getGlobals().get(f'open_{ext}'))\n if openfunc:\n return dict(filetype=ext, _likelihood=3)\n\n\[email protected]\ndef openPath(vd, p, filetype=None, create=False):\n '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.\n If True, *create* will return a new, blank **Sheet** if file does not exist.'''\n if p.scheme and not p.has_fp():\n schemes = p.scheme.split('+')\n openfuncname = 'openurl_' + schemes[-1]\n\n openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)\n if not openfunc:\n vd.fail(f'no loader for url scheme: {p.scheme}')\n\n return openfunc(p, filetype=filetype)\n\n if not p.exists() and not create:\n return None\n\n if not filetype:\n filetype = p.ext or vd.options.filetype\n\n filetype = filetype.lower()\n\n if not p.exists():\n if not create:\n return None\n newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))\n if not newfunc:\n vd.warning('%s does not exist, creating new sheet' % p)\n return vd.newSheet(p.name, 1, source=p)\n\n vd.status('creating blank %s' % (p.given))\n return newfunc(p)\n\n openfuncname = 'open_' + filetype\n openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))\n if not openfunc:\n opts = vd.guessFiletype(p)\n if opts and 'filetype' in opts:\n filetype = opts['filetype']\n openfuncname = 'open_' + filetype\n openfunc = getattr(vd, openfuncname, vd.getGlobals().get(openfuncname))\n if not openfunc:\n vd.error(f'guessed {filetype} but no {openfuncname}')\n\n vs = openfunc(p)\n for k, v in opts.items():\n if k != 'filetype' and not k.startswith('_'):\n setattr(vs.options, k, v)\n vd.warning('guessed \"%s\" filetype based on contents' % opts['filetype'])\n return vs\n\n vd.warning('unknown \"%s\" filetype' % filetype)\n\n filetype = 'txt'\n openfunc = vd.open_txt\n\n vd.status('opening %s as %s' % (p.given, filetype))\n\n return openfunc(p)\n\n\[email protected]\ndef openSource(vd, p, filetype=None, create=False, **kwargs):\n '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or \"-\" for stdin).\n when true, *create* will return a blank sheet, if file does not exist.'''\n\n if isinstance(p, BaseSheet):\n return p\n\n filetype = filetype or vd.options.getonly('filetype', str(p), '') #1710\n filetype = filetype or vd.options.getonly('filetype', 'global', '')\n\n vs = None\n if isinstance(p, str):\n if '://' in p:\n vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse\n elif p == '-':\n vs = vd.openPath(vd.stdinSource, filetype=filetype)\n else:\n vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse\n else:\n vs = vd.openPath(p, filetype=filetype, create=create)\n\n for optname, optval in kwargs.items():\n vs.options[optname] = optval\n\n return vs\n\n\n#### enable external addons\[email protected]\ndef open_txt(vd, p):\n 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'\n if p.exists(): #1611\n with p.open_text(encoding=vd.options.encoding) as fp:\n delimiter = vd.options.delimiter\n try:\n if delimiter and delimiter in next(fp): # peek at the first line\n return vd.open_tsv(p) # TSV often have .txt extension\n except StopIteration:\n return TableSheet(p.name, columns=[SettableColumn()], source=p)\n return TextSheet(p.name, source=p)\n\n\[email protected]\ndef loadInternalSheet(vd, cls, p, **kwargs):\n 'Load internal sheet of given class.'\n vs = cls(p.name, source=p, **kwargs)\n vd.options._set('encoding', 'utf8', vs)\n if p.exists():\n# vd.sheets.insert(0, vs) # broke replay with macros.reload()\n vs.reload.__wrapped__(vs)\n# vd.sheets.pop(0)\n return vs\n\n\nBaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\nTableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f\"file {cursorDisplay} does not exist\"))', 'Open file or URL from path in current cell')\nBaseSheet.addCommand('gU', 'undo-last-quit', 'push(allSheets[-1])', 'reopen most recently closed sheet')\n\nvd.addMenuItems('''\n File > Open file/url > open-file\n File > Reopen last closed > undo-last-quit\n''')\n", "path": "visidata/_open.py"}]} | 2,540 | 192 |
gh_patches_debug_31882 | rasdani/github-patches | git_diff | cal-itp__benefits-191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enrollment page is accessible without passing eligibility verification
If a user knows the `/enrollment` endpoint, and has configured their session with a Transit Agency, they can navigate to the credit card enrollment page and completely bypass the EV check. This is a serious bug!
## Steps to reproduce
1. Visit https://test-benefits.calitp.org
2. Click either of the Agency buttons
3. Manually update the URL to https://test-benefits.calitp.org/enrollment
4. Profit
## Remediation
This page must ensure the user has previously verified eligibility criteria
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/middleware.py`
Content:
```
1 """
2 The core application: middleware definitions for request/response cycle.
3 """
4 import logging
5
6 from django.http import HttpResponse
7 from django.utils.decorators import decorator_from_middleware
8 from django.utils.deprecation import MiddlewareMixin
9 from django.views import i18n
10
11 from benefits.settings import DEBUG
12 from . import analytics, session
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class AgencySessionRequired(MiddlewareMixin):
19 """Middleware raises an exception for sessions lacking an agency configuration."""
20
21 def process_request(self, request):
22 if session.active_agency(request):
23 logger.debug("Session configured with agency")
24 return None
25 else:
26 raise AttributeError("Session not configured with agency")
27
28
29 class DebugSession(MiddlewareMixin):
30 """Middleware to configure debug context in the request session."""
31
32 def process_request(self, request):
33 session.update(request, debug=DEBUG)
34 return None
35
36
37 class Healthcheck:
38 """Middleware intercepts and accepts /healthcheck requests."""
39
40 def __init__(self, get_response):
41 self.get_response = get_response
42
43 def __call__(self, request):
44 if request.path == "/healthcheck":
45 return HttpResponse("Healthy", content_type="text/plain")
46 return self.get_response(request)
47
48
49 class ViewedPageEvent(MiddlewareMixin):
50 """Middleware sends an analytics event for page views."""
51
52 def process_response(self, request, response):
53 event = analytics.ViewedPageEvent(request)
54 try:
55 analytics.send_event(event)
56 except Exception:
57 logger.warning(f"Failed to send event: {event}")
58 finally:
59 return response
60
61
62 pageview_decorator = decorator_from_middleware(ViewedPageEvent)
63
64
65 class ChangedLanguageEvent(MiddlewareMixin):
66 """Middleware hooks into django.views.i18n.set_language to send an analytics event."""
67
68 def process_view(self, request, view_func, view_args, view_kwargs):
69 if view_func == i18n.set_language:
70 new_lang = request.POST["language"]
71 event = analytics.ChangedLanguageEvent(request, new_lang)
72 analytics.send_event(event)
73 return None
74
```
Path: `benefits/enrollment/views.py`
Content:
```
1 """
2 The enrollment application: view definitions for the benefits enrollment flow.
3 """
4 import logging
5
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.decorators import decorator_from_middleware
9 from django.utils.translation import pgettext, ugettext as _
10
11 from benefits.core import middleware, models, session, viewmodels
12 from benefits.core.views import PageTemplateResponse
13 from . import api, forms
14
15
16 logger = logging.getLogger(__name__)
17
18
19 def _check_access_token(request, agency):
20 """
21 Ensure the request's session is configured with an access token.
22 """
23 if not session.valid_token(request):
24 response = api.Client(agency).access_token()
25 session.update(request, token=response.access_token, token_exp=response.expiry)
26
27
28 def _index(request):
29 """Helper handles GET requests to enrollment index."""
30 agency = session.agency(request)
31
32 _check_access_token(request, agency)
33
34 tokenize_button = "tokenize_card"
35 tokenize_retry_form = forms.CardTokenizeFailForm("enrollment:retry")
36 tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix="")
37
38 page = viewmodels.Page(
39 title=_("enrollment.index.title"),
40 content_title=_("enrollment.index.content_title"),
41 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
42 paragraphs=[_("enrollment.index.p1"), _("enrollment.index.p2")],
43 classes="text-lg-center",
44 forms=[tokenize_retry_form, tokenize_success_form],
45 buttons=[
46 viewmodels.Button.primary(
47 text=_("enrollment.buttons.paymentpartner"), id=tokenize_button, url=f"#{tokenize_button}"
48 ),
49 viewmodels.Button.link(
50 classes="btn-sm", text=_("enrollment.buttons.paymentoptions"), url=reverse("core:payment_options")
51 ),
52 ],
53 )
54 context = {}
55 context.update(page.context_dict())
56
57 # add agency details
58 agency_vm = viewmodels.TransitAgency(agency)
59 context.update(agency_vm.context_dict())
60
61 # and payment processor details
62 processor_vm = viewmodels.PaymentProcessor(
63 model=agency.payment_processor,
64 access_token=session.token(request),
65 element_id=f"#{tokenize_button}",
66 color="#046b99",
67 name=f"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}",
68 )
69 context.update(processor_vm.context_dict())
70 logger.warn(f"card_tokenize_url: {context['payment_processor'].card_tokenize_url}")
71
72 # the tokenize form URLs are injected to page-generated Javascript
73 context["forms"] = {
74 "tokenize_retry": reverse(tokenize_retry_form.action_url),
75 "tokenize_success": reverse(tokenize_success_form.action_url),
76 }
77
78 return TemplateResponse(request, "enrollment/index.html", context)
79
80
81 @decorator_from_middleware(middleware.AgencySessionRequired)
82 def index(request):
83 """View handler for the enrollment landing page."""
84 if request.method == "POST":
85 response = _enroll(request)
86 else:
87 response = _index(request)
88
89 return response
90
91
92 def _enroll(request):
93 """Helper calls the enrollment APIs."""
94 logger.debug("Read tokenized card")
95 form = forms.CardTokenizeSuccessForm(request.POST)
96 if not form.is_valid():
97 raise Exception("Invalid card token form")
98 card_token = form.cleaned_data.get("card_token")
99
100 eligibility = session.eligibility(request)
101 if eligibility:
102 logger.debug(f"Session contains an {models.EligibilityType.__name__}")
103 else:
104 raise Exception("Session contains no eligibility information")
105
106 agency = session.agency(request)
107
108 response = api.Client(agency).enroll(card_token, eligibility.group_id)
109
110 if response.success:
111 return success(request)
112 else:
113 raise Exception("Updated customer_id does not match enrolled customer_id")
114
115
116 @decorator_from_middleware(middleware.AgencySessionRequired)
117 def retry(request):
118 """View handler for a recoverable failure condition."""
119 if request.method == "POST":
120 form = forms.CardTokenizeFailForm(request.POST)
121 if form.is_valid():
122 agency = session.agency(request)
123 page = viewmodels.Page(
124 title=_("enrollment.retry.title"),
125 icon=viewmodels.Icon("bankcardquestion", pgettext("image alt text", "core.icons.bankcardquestion")),
126 content_title=_("enrollment.retry.title"),
127 paragraphs=[_("enrollment.retry.p1")],
128 buttons=viewmodels.Button.agency_contact_links(agency),
129 )
130 page.buttons.append(viewmodels.Button.primary(text=_("enrollment.retry.button"), url=session.origin(request)))
131 return PageTemplateResponse(request, page)
132 else:
133 raise Exception("Invalid retry submission.")
134 else:
135 raise Exception("This view method only supports POST.")
136
137
138 @middleware.pageview_decorator
139 def success(request):
140 """View handler for the final success page."""
141 request.path = "/enrollment/success"
142
143 page = viewmodels.Page(
144 title=_("enrollment.success.title"),
145 icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
146 content_title=_("enrollment.success.title"),
147 paragraphs=[_("enrollment.success.p1"), _("enrollment.success.p2")],
148 )
149
150 return TemplateResponse(request, "enrollment/success.html", page.context_dict())
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/core/middleware.py b/benefits/core/middleware.py
--- a/benefits/core/middleware.py
+++ b/benefits/core/middleware.py
@@ -26,6 +26,17 @@
raise AttributeError("Session not configured with agency")
+class EligibleSessionRequired(MiddlewareMixin):
+ """Middleware raises an exception for sessions lacking confirmed eligibility."""
+
+ def process_request(self, request):
+ if session.eligible(request):
+ logger.debug("Session has confirmed eligibility")
+ return None
+ else:
+ raise AttributeError("Session has no confirmed eligibility")
+
+
class DebugSession(MiddlewareMixin):
"""Middleware to configure debug context in the request session."""
diff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py
--- a/benefits/enrollment/views.py
+++ b/benefits/enrollment/views.py
@@ -78,17 +78,6 @@
return TemplateResponse(request, "enrollment/index.html", context)
-@decorator_from_middleware(middleware.AgencySessionRequired)
-def index(request):
- """View handler for the enrollment landing page."""
- if request.method == "POST":
- response = _enroll(request)
- else:
- response = _index(request)
-
- return response
-
-
def _enroll(request):
"""Helper calls the enrollment APIs."""
logger.debug("Read tokenized card")
@@ -113,7 +102,18 @@
raise Exception("Updated customer_id does not match enrolled customer_id")
-@decorator_from_middleware(middleware.AgencySessionRequired)
+@decorator_from_middleware(middleware.EligibleSessionRequired)
+def index(request):
+ """View handler for the enrollment landing page."""
+ if request.method == "POST":
+ response = _enroll(request)
+ else:
+ response = _index(request)
+
+ return response
+
+
+@decorator_from_middleware(middleware.EligibleSessionRequired)
def retry(request):
"""View handler for a recoverable failure condition."""
if request.method == "POST":
| {"golden_diff": "diff --git a/benefits/core/middleware.py b/benefits/core/middleware.py\n--- a/benefits/core/middleware.py\n+++ b/benefits/core/middleware.py\n@@ -26,6 +26,17 @@\n raise AttributeError(\"Session not configured with agency\")\n \n \n+class EligibleSessionRequired(MiddlewareMixin):\n+ \"\"\"Middleware raises an exception for sessions lacking confirmed eligibility.\"\"\"\n+\n+ def process_request(self, request):\n+ if session.eligible(request):\n+ logger.debug(\"Session has confirmed eligibility\")\n+ return None\n+ else:\n+ raise AttributeError(\"Session has no confirmed eligibility\")\n+\n+\n class DebugSession(MiddlewareMixin):\n \"\"\"Middleware to configure debug context in the request session.\"\"\"\n \ndiff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py\n--- a/benefits/enrollment/views.py\n+++ b/benefits/enrollment/views.py\n@@ -78,17 +78,6 @@\n return TemplateResponse(request, \"enrollment/index.html\", context)\n \n \n-@decorator_from_middleware(middleware.AgencySessionRequired)\n-def index(request):\n- \"\"\"View handler for the enrollment landing page.\"\"\"\n- if request.method == \"POST\":\n- response = _enroll(request)\n- else:\n- response = _index(request)\n-\n- return response\n-\n-\n def _enroll(request):\n \"\"\"Helper calls the enrollment APIs.\"\"\"\n logger.debug(\"Read tokenized card\")\n@@ -113,7 +102,18 @@\n raise Exception(\"Updated customer_id does not match enrolled customer_id\")\n \n \n-@decorator_from_middleware(middleware.AgencySessionRequired)\n+@decorator_from_middleware(middleware.EligibleSessionRequired)\n+def index(request):\n+ \"\"\"View handler for the enrollment landing page.\"\"\"\n+ if request.method == \"POST\":\n+ response = _enroll(request)\n+ else:\n+ response = _index(request)\n+\n+ return response\n+\n+\n+@decorator_from_middleware(middleware.EligibleSessionRequired)\n def retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n", "issue": "Enrollment page is accessible without passing eligibility verification\nIf a user knows the `/enrollment` endpoint, and has configured their session with a Transit Agency, they can navigate to the credit card enrollment page and completely bypass the EV check. This is a serious bug! \r\n\r\n## Steps to reproduce\r\n\r\n1. Visit https://test-benefits.calitp.org\r\n2. Click either of the Agency buttons\r\n3. Manually update the URL to https://test-benefits.calitp.org/enrollment\r\n4. Profit\r\n\r\n## Remediation\r\n\r\nThis page must ensure the user has previously verified eligibility criteria\n", "before_files": [{"content": "\"\"\"\nThe core application: middleware definitions for request/response cycle.\n\"\"\"\nimport logging\n\nfrom django.http import HttpResponse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.views import i18n\n\nfrom benefits.settings import DEBUG\nfrom . import analytics, session\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AgencySessionRequired(MiddlewareMixin):\n \"\"\"Middleware raises an exception for sessions lacking an agency configuration.\"\"\"\n\n def process_request(self, request):\n if session.active_agency(request):\n logger.debug(\"Session configured with agency\")\n return None\n else:\n raise AttributeError(\"Session not configured with agency\")\n\n\nclass DebugSession(MiddlewareMixin):\n \"\"\"Middleware to configure debug context in the request session.\"\"\"\n\n def process_request(self, request):\n session.update(request, debug=DEBUG)\n return None\n\n\nclass Healthcheck:\n \"\"\"Middleware intercepts and accepts /healthcheck requests.\"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if request.path == \"/healthcheck\":\n return HttpResponse(\"Healthy\", content_type=\"text/plain\")\n return self.get_response(request)\n\n\nclass ViewedPageEvent(MiddlewareMixin):\n \"\"\"Middleware sends an analytics event for page views.\"\"\"\n\n def process_response(self, request, response):\n event = analytics.ViewedPageEvent(request)\n try:\n analytics.send_event(event)\n except Exception:\n logger.warning(f\"Failed to send event: {event}\")\n finally:\n return response\n\n\npageview_decorator = decorator_from_middleware(ViewedPageEvent)\n\n\nclass ChangedLanguageEvent(MiddlewareMixin):\n \"\"\"Middleware hooks into django.views.i18n.set_language to send an analytics event.\"\"\"\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if view_func == i18n.set_language:\n new_lang = request.POST[\"language\"]\n event = analytics.ChangedLanguageEvent(request, new_lang)\n analytics.send_event(event)\n return None\n", "path": "benefits/core/middleware.py"}, {"content": "\"\"\"\nThe enrollment application: view definitions for the benefits enrollment flow.\n\"\"\"\nimport logging\n\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, ugettext as _\n\nfrom benefits.core import middleware, models, session, viewmodels\nfrom benefits.core.views import PageTemplateResponse\nfrom . import api, forms\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_access_token(request, agency):\n \"\"\"\n Ensure the request's session is configured with an access token.\n \"\"\"\n if not session.valid_token(request):\n response = api.Client(agency).access_token()\n session.update(request, token=response.access_token, token_exp=response.expiry)\n\n\ndef _index(request):\n \"\"\"Helper handles GET requests to enrollment index.\"\"\"\n agency = session.agency(request)\n\n _check_access_token(request, agency)\n\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(\"enrollment:retry\")\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n page = viewmodels.Page(\n title=_(\"enrollment.index.title\"),\n content_title=_(\"enrollment.index.content_title\"),\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n paragraphs=[_(\"enrollment.index.p1\"), _(\"enrollment.index.p2\")],\n classes=\"text-lg-center\",\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"enrollment.buttons.paymentpartner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n viewmodels.Button.link(\n classes=\"btn-sm\", text=_(\"enrollment.buttons.paymentoptions\"), url=reverse(\"core:payment_options\")\n ),\n ],\n )\n context = {}\n context.update(page.context_dict())\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and payment processor details\n processor_vm = viewmodels.PaymentProcessor(\n model=agency.payment_processor,\n access_token=session.token(request),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}\",\n )\n context.update(processor_vm.context_dict())\n logger.warn(f\"card_tokenize_url: {context['payment_processor'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context[\"forms\"] = {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n\n return TemplateResponse(request, \"enrollment/index.html\", context)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef index(request):\n \"\"\"View handler for the enrollment landing page.\"\"\"\n if request.method == \"POST\":\n response = _enroll(request)\n else:\n response = _index(request)\n\n return response\n\n\ndef _enroll(request):\n \"\"\"Helper calls the enrollment APIs.\"\"\"\n logger.debug(\"Read tokenized card\")\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n eligibility = session.eligibility(request)\n if eligibility:\n logger.debug(f\"Session contains an {models.EligibilityType.__name__}\")\n else:\n raise Exception(\"Session contains no eligibility information\")\n\n agency = session.agency(request)\n\n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n\n if response.success:\n return success(request)\n else:\n raise Exception(\"Updated customer_id does not match enrolled customer_id\")\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n title=_(\"enrollment.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n content_title=_(\"enrollment.retry.title\"),\n paragraphs=[_(\"enrollment.retry.p1\")],\n buttons=viewmodels.Button.agency_contact_links(agency),\n )\n page.buttons.append(viewmodels.Button.primary(text=_(\"enrollment.retry.button\"), url=session.origin(request)))\n return PageTemplateResponse(request, page)\n else:\n raise Exception(\"Invalid retry submission.\")\n else:\n raise Exception(\"This view method only supports POST.\")\n\n\[email protected]_decorator\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n request.path = \"/enrollment/success\"\n\n page = viewmodels.Page(\n title=_(\"enrollment.success.title\"),\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n content_title=_(\"enrollment.success.title\"),\n paragraphs=[_(\"enrollment.success.p1\"), _(\"enrollment.success.p2\")],\n )\n\n return TemplateResponse(request, \"enrollment/success.html\", page.context_dict())\n", "path": "benefits/enrollment/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: middleware definitions for request/response cycle.\n\"\"\"\nimport logging\n\nfrom django.http import HttpResponse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.views import i18n\n\nfrom benefits.settings import DEBUG\nfrom . import analytics, session\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AgencySessionRequired(MiddlewareMixin):\n \"\"\"Middleware raises an exception for sessions lacking an agency configuration.\"\"\"\n\n def process_request(self, request):\n if session.active_agency(request):\n logger.debug(\"Session configured with agency\")\n return None\n else:\n raise AttributeError(\"Session not configured with agency\")\n\n\nclass EligibleSessionRequired(MiddlewareMixin):\n \"\"\"Middleware raises an exception for sessions lacking confirmed eligibility.\"\"\"\n\n def process_request(self, request):\n if session.eligible(request):\n logger.debug(\"Session has confirmed eligibility\")\n return None\n else:\n raise AttributeError(\"Session has no confirmed eligibility\")\n\n\nclass DebugSession(MiddlewareMixin):\n \"\"\"Middleware to configure debug context in the request session.\"\"\"\n\n def process_request(self, request):\n session.update(request, debug=DEBUG)\n return None\n\n\nclass Healthcheck:\n \"\"\"Middleware intercepts and accepts /healthcheck requests.\"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if request.path == \"/healthcheck\":\n return HttpResponse(\"Healthy\", content_type=\"text/plain\")\n return self.get_response(request)\n\n\nclass ViewedPageEvent(MiddlewareMixin):\n \"\"\"Middleware sends an analytics event for page views.\"\"\"\n\n def process_response(self, request, response):\n event = analytics.ViewedPageEvent(request)\n try:\n analytics.send_event(event)\n except Exception:\n logger.warning(f\"Failed to send event: {event}\")\n finally:\n return response\n\n\npageview_decorator = decorator_from_middleware(ViewedPageEvent)\n\n\nclass ChangedLanguageEvent(MiddlewareMixin):\n \"\"\"Middleware hooks into django.views.i18n.set_language to send an analytics event.\"\"\"\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if view_func == i18n.set_language:\n new_lang = request.POST[\"language\"]\n event = analytics.ChangedLanguageEvent(request, new_lang)\n analytics.send_event(event)\n return None\n", "path": "benefits/core/middleware.py"}, {"content": "\"\"\"\nThe enrollment application: view definitions for the benefits enrollment flow.\n\"\"\"\nimport logging\n\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, ugettext as _\n\nfrom benefits.core import middleware, models, session, viewmodels\nfrom benefits.core.views import PageTemplateResponse\nfrom . import api, forms\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_access_token(request, agency):\n \"\"\"\n Ensure the request's session is configured with an access token.\n \"\"\"\n if not session.valid_token(request):\n response = api.Client(agency).access_token()\n session.update(request, token=response.access_token, token_exp=response.expiry)\n\n\ndef _index(request):\n \"\"\"Helper handles GET requests to enrollment index.\"\"\"\n agency = session.agency(request)\n\n _check_access_token(request, agency)\n\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(\"enrollment:retry\")\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n page = viewmodels.Page(\n title=_(\"enrollment.index.title\"),\n content_title=_(\"enrollment.index.content_title\"),\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n paragraphs=[_(\"enrollment.index.p1\"), _(\"enrollment.index.p2\")],\n classes=\"text-lg-center\",\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"enrollment.buttons.paymentpartner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n viewmodels.Button.link(\n classes=\"btn-sm\", text=_(\"enrollment.buttons.paymentoptions\"), url=reverse(\"core:payment_options\")\n ),\n ],\n )\n context = {}\n context.update(page.context_dict())\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and payment processor details\n processor_vm = viewmodels.PaymentProcessor(\n model=agency.payment_processor,\n access_token=session.token(request),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}\",\n )\n context.update(processor_vm.context_dict())\n logger.warn(f\"card_tokenize_url: {context['payment_processor'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context[\"forms\"] = {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n\n return TemplateResponse(request, \"enrollment/index.html\", context)\n\n\ndef _enroll(request):\n \"\"\"Helper calls the enrollment APIs.\"\"\"\n logger.debug(\"Read tokenized card\")\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n eligibility = session.eligibility(request)\n if eligibility:\n logger.debug(f\"Session contains an {models.EligibilityType.__name__}\")\n else:\n raise Exception(\"Session contains no eligibility information\")\n\n agency = session.agency(request)\n\n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n\n if response.success:\n return success(request)\n else:\n raise Exception(\"Updated customer_id does not match enrolled customer_id\")\n\n\n@decorator_from_middleware(middleware.EligibleSessionRequired)\ndef index(request):\n \"\"\"View handler for the enrollment landing page.\"\"\"\n if request.method == \"POST\":\n response = _enroll(request)\n else:\n response = _index(request)\n\n return response\n\n\n@decorator_from_middleware(middleware.EligibleSessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n title=_(\"enrollment.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n content_title=_(\"enrollment.retry.title\"),\n paragraphs=[_(\"enrollment.retry.p1\")],\n buttons=viewmodels.Button.agency_contact_links(agency),\n )\n page.buttons.append(viewmodels.Button.primary(text=_(\"enrollment.retry.button\"), url=session.origin(request)))\n return PageTemplateResponse(request, page)\n else:\n raise Exception(\"Invalid retry submission.\")\n else:\n raise Exception(\"This view method only supports POST.\")\n\n\[email protected]_decorator\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n request.path = \"/enrollment/success\"\n\n page = viewmodels.Page(\n title=_(\"enrollment.success.title\"),\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n content_title=_(\"enrollment.success.title\"),\n paragraphs=[_(\"enrollment.success.p1\"), _(\"enrollment.success.p2\")],\n )\n\n return TemplateResponse(request, \"enrollment/success.html\", page.context_dict())\n", "path": "benefits/enrollment/views.py"}]} | 2,457 | 467 |
gh_patches_debug_20127 | rasdani/github-patches | git_diff | rotki__rotki-591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sign In Failed - TypeError
Good evening!
I'm here on Linux, I've just tried to log in to my Rotki database created with 1.0.4 (using 1.0.5 now). After I type in my password and log in, I get the message
> **Sign In Failed**
> TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
Now when I attempt to go back to 1.0.4 I get
> **Sign In Failed**
> DBUpgradeError: Your database version is newer than the version expected by the executable. Did you perhaps try to revert to an older rotkehlchen version?Please only use the latest version of the software.
No big worries, I'm still evaluating the software to see if it can do what I need so there's not a ton of data in there. Just thought you should know. I'll be happy to help with debugging if I can. But kids, so... I'll do my best!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/db/settings.py`
Content:
```
1 from typing import Any, Dict, NamedTuple, Union
2
3 from rotkehlchen.constants.assets import S_USD
4 from rotkehlchen.constants.timing import YEAR_IN_SECONDS
5 from rotkehlchen.db.utils import str_to_bool
6 from rotkehlchen.errors import DeserializationError
7 from rotkehlchen.typing import FiatAsset, Timestamp
8 from rotkehlchen.user_messages import MessagesAggregator
9
10 ROTKEHLCHEN_DB_VERSION = 8
11 DEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS
12 DEFAULT_INCLUDE_CRYPTO2CRYPTO = True
13 DEFAULT_INCLUDE_GAS_COSTS = True
14 DEFAULT_ANONYMIZED_LOGS = False
15 DEFAULT_PREMIUM_SHOULD_SYNC = False
16 DEFAULT_START_DATE = '01/08/2015'
17 DEFAULT_UI_FLOATING_PRECISION = 2
18 DEFAULT_BALANCE_SAVE_FREQUENCY = 24
19 DEFAULT_MAIN_CURRENCY = S_USD
20 DEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'
21 DEFAULT_SUBMIT_USAGE_ANALYTICS = True
22
23
24 class DBSettings(NamedTuple):
25 version: int = ROTKEHLCHEN_DB_VERSION
26 last_write_ts: Timestamp = Timestamp(0)
27 premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC
28 include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO
29 anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS
30 last_data_upload_ts: Timestamp = Timestamp(0)
31 ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION
32 taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD
33 balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY
34 include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS
35 historical_data_start: str = DEFAULT_START_DATE
36 eth_rpc_endpoint: str = 'http://localhost:8545'
37 main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY
38 date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT
39 last_balance_save: Timestamp = Timestamp(0)
40 submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS
41
42
43 def read_boolean(value: Union[str, bool]) -> bool:
44 if isinstance(value, bool):
45 return value
46 elif isinstance(value, str):
47 return str_to_bool(value)
48
49 raise DeserializationError(
50 f'Failed to read a boolean from {value} which is of type {type(value)}',
51 )
52
53
54 def db_settings_from_dict(
55 settings_dict: Dict[str, Any],
56 msg_aggregator: MessagesAggregator,
57 ) -> DBSettings:
58 specified_args: Dict[str, Any] = {}
59 for key, value in settings_dict.items():
60 if key == 'version':
61 specified_args[key] = int(value)
62 elif key == 'historical_data_start':
63 specified_args[key] = str(value)
64 elif key == 'eth_rpc_endpoint':
65 specified_args[key] = str(value)
66 elif key == 'ui_floating_precision':
67 specified_args[key] = int(value)
68 elif key == 'include_crypto2crypto':
69 specified_args[key] = read_boolean(value)
70 elif key == 'taxfree_after_period':
71 specified_args[key] = int(value)
72 elif key == 'balance_save_frequency':
73 specified_args[key] = int(value)
74 elif key == 'main_currency':
75 specified_args[key] = FiatAsset(str(value))
76 elif key == 'anonymized_logs':
77 specified_args[key] = read_boolean(value)
78 elif key == 'include_gas_costs':
79 specified_args[key] = read_boolean(value)
80 elif key == 'date_display_format':
81 specified_args[key] = str(value)
82 elif key == 'premium_should_sync':
83 specified_args[key] = read_boolean(value)
84 elif key == 'last_write_ts':
85 specified_args[key] = Timestamp(int(value))
86 elif key == 'last_data_upload_ts':
87 specified_args[key] = Timestamp(int(value))
88 elif key == 'last_balance_save':
89 specified_args[key] = Timestamp(int(value))
90 elif key == 'submit_usage_analytics':
91 specified_args[key] = read_boolean(value)
92 else:
93 msg_aggregator.add_warning(
94 f'Unknown DB setting {key} given. Ignoring it. Should not '
95 f'happen so please open an issue in Github.',
96 )
97
98 return DBSettings(**specified_args)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rotkehlchen/db/settings.py b/rotkehlchen/db/settings.py
--- a/rotkehlchen/db/settings.py
+++ b/rotkehlchen/db/settings.py
@@ -68,7 +68,23 @@
elif key == 'include_crypto2crypto':
specified_args[key] = read_boolean(value)
elif key == 'taxfree_after_period':
- specified_args[key] = int(value)
+ # taxfree_after_period can also be None, to signify disabled setting
+ if value is None:
+ specified_args[key] = value
+ else:
+ int_value = int(value)
+ if int_value <= 0:
+ value = None
+ msg_aggregator.add_warning(
+ f'A negative or zero value ({int_value}) for taxfree_after_period '
+ f'ended up in the DB. Setting it to None. Please open an issue in '
+ f'Github: https://github.com/rotki/rotki/issues/new/choose',
+ )
+
+ else:
+ value = int_value
+
+ specified_args[key] = value
elif key == 'balance_save_frequency':
specified_args[key] = int(value)
elif key == 'main_currency':
| {"golden_diff": "diff --git a/rotkehlchen/db/settings.py b/rotkehlchen/db/settings.py\n--- a/rotkehlchen/db/settings.py\n+++ b/rotkehlchen/db/settings.py\n@@ -68,7 +68,23 @@\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n- specified_args[key] = int(value)\n+ # taxfree_after_period can also be None, to signify disabled setting\n+ if value is None:\n+ specified_args[key] = value\n+ else:\n+ int_value = int(value)\n+ if int_value <= 0:\n+ value = None\n+ msg_aggregator.add_warning(\n+ f'A negative or zero value ({int_value}) for taxfree_after_period '\n+ f'ended up in the DB. Setting it to None. Please open an issue in '\n+ f'Github: https://github.com/rotki/rotki/issues/new/choose',\n+ )\n+\n+ else:\n+ value = int_value\n+\n+ specified_args[key] = value\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n", "issue": "Sign In Failed - TypeError\nGood evening!\r\n\r\nI'm here on Linux, I've just tried to log in to my Rotki database created with 1.0.4 (using 1.0.5 now). After I type in my password and log in, I get the message\r\n\r\n> **Sign In Failed**\r\n> TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'\r\n\r\nNow when I attempt to go back to 1.0.4 I get\r\n> **Sign In Failed**\r\n> DBUpgradeError: Your database version is newer than the version expected by the executable. Did you perhaps try to revert to an older rotkehlchen version?Please only use the latest version of the software.\r\n\r\nNo big worries, I'm still evaluating the software to see if it can do what I need so there's not a ton of data in there. Just thought you should know. I'll be happy to help with debugging if I can. But kids, so... I'll do my best!\n", "before_files": [{"content": "from typing import Any, Dict, NamedTuple, Union\n\nfrom rotkehlchen.constants.assets import S_USD\nfrom rotkehlchen.constants.timing import YEAR_IN_SECONDS\nfrom rotkehlchen.db.utils import str_to_bool\nfrom rotkehlchen.errors import DeserializationError\nfrom rotkehlchen.typing import FiatAsset, Timestamp\nfrom rotkehlchen.user_messages import MessagesAggregator\n\nROTKEHLCHEN_DB_VERSION = 8\nDEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS\nDEFAULT_INCLUDE_CRYPTO2CRYPTO = True\nDEFAULT_INCLUDE_GAS_COSTS = True\nDEFAULT_ANONYMIZED_LOGS = False\nDEFAULT_PREMIUM_SHOULD_SYNC = False\nDEFAULT_START_DATE = '01/08/2015'\nDEFAULT_UI_FLOATING_PRECISION = 2\nDEFAULT_BALANCE_SAVE_FREQUENCY = 24\nDEFAULT_MAIN_CURRENCY = S_USD\nDEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'\nDEFAULT_SUBMIT_USAGE_ANALYTICS = True\n\n\nclass DBSettings(NamedTuple):\n version: int = ROTKEHLCHEN_DB_VERSION\n last_write_ts: Timestamp = Timestamp(0)\n premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC\n include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO\n anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS\n last_data_upload_ts: Timestamp = Timestamp(0)\n ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION\n taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD\n balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY\n include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS\n historical_data_start: str = DEFAULT_START_DATE\n eth_rpc_endpoint: str = 'http://localhost:8545'\n main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY\n date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT\n last_balance_save: Timestamp = Timestamp(0)\n submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS\n\n\ndef read_boolean(value: Union[str, bool]) -> bool:\n if isinstance(value, bool):\n return value\n elif isinstance(value, str):\n return str_to_bool(value)\n\n raise DeserializationError(\n f'Failed to read a boolean from {value} which is of type {type(value)}',\n )\n\n\ndef db_settings_from_dict(\n settings_dict: Dict[str, Any],\n msg_aggregator: MessagesAggregator,\n) -> DBSettings:\n specified_args: Dict[str, Any] = {}\n for key, value in settings_dict.items():\n if key == 'version':\n specified_args[key] = int(value)\n elif key == 'historical_data_start':\n specified_args[key] = str(value)\n elif key == 'eth_rpc_endpoint':\n specified_args[key] = str(value)\n elif key == 'ui_floating_precision':\n specified_args[key] = int(value)\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n specified_args[key] = int(value)\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n specified_args[key] = FiatAsset(str(value))\n elif key == 'anonymized_logs':\n specified_args[key] = read_boolean(value)\n elif key == 'include_gas_costs':\n specified_args[key] = read_boolean(value)\n elif key == 'date_display_format':\n specified_args[key] = str(value)\n elif key == 'premium_should_sync':\n specified_args[key] = read_boolean(value)\n elif key == 'last_write_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_data_upload_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_balance_save':\n specified_args[key] = Timestamp(int(value))\n elif key == 'submit_usage_analytics':\n specified_args[key] = read_boolean(value)\n else:\n msg_aggregator.add_warning(\n f'Unknown DB setting {key} given. Ignoring it. Should not '\n f'happen so please open an issue in Github.',\n )\n\n return DBSettings(**specified_args)\n", "path": "rotkehlchen/db/settings.py"}], "after_files": [{"content": "from typing import Any, Dict, NamedTuple, Union\n\nfrom rotkehlchen.constants.assets import S_USD\nfrom rotkehlchen.constants.timing import YEAR_IN_SECONDS\nfrom rotkehlchen.db.utils import str_to_bool\nfrom rotkehlchen.errors import DeserializationError\nfrom rotkehlchen.typing import FiatAsset, Timestamp\nfrom rotkehlchen.user_messages import MessagesAggregator\n\nROTKEHLCHEN_DB_VERSION = 8\nDEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS\nDEFAULT_INCLUDE_CRYPTO2CRYPTO = True\nDEFAULT_INCLUDE_GAS_COSTS = True\nDEFAULT_ANONYMIZED_LOGS = False\nDEFAULT_PREMIUM_SHOULD_SYNC = False\nDEFAULT_START_DATE = '01/08/2015'\nDEFAULT_UI_FLOATING_PRECISION = 2\nDEFAULT_BALANCE_SAVE_FREQUENCY = 24\nDEFAULT_MAIN_CURRENCY = S_USD\nDEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'\nDEFAULT_SUBMIT_USAGE_ANALYTICS = True\n\n\nclass DBSettings(NamedTuple):\n version: int = ROTKEHLCHEN_DB_VERSION\n last_write_ts: Timestamp = Timestamp(0)\n premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC\n include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO\n anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS\n last_data_upload_ts: Timestamp = Timestamp(0)\n ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION\n taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD\n balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY\n include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS\n historical_data_start: str = DEFAULT_START_DATE\n eth_rpc_endpoint: str = 'http://localhost:8545'\n main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY\n date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT\n last_balance_save: Timestamp = Timestamp(0)\n submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS\n\n\ndef read_boolean(value: Union[str, bool]) -> bool:\n if isinstance(value, bool):\n return value\n elif isinstance(value, str):\n return str_to_bool(value)\n\n raise DeserializationError(\n f'Failed to read a boolean from {value} which is of type {type(value)}',\n )\n\n\ndef db_settings_from_dict(\n settings_dict: Dict[str, Any],\n msg_aggregator: MessagesAggregator,\n) -> DBSettings:\n specified_args: Dict[str, Any] = {}\n for key, value in settings_dict.items():\n if key == 'version':\n specified_args[key] = int(value)\n elif key == 'historical_data_start':\n specified_args[key] = str(value)\n elif key == 'eth_rpc_endpoint':\n specified_args[key] = str(value)\n elif key == 'ui_floating_precision':\n specified_args[key] = int(value)\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n # taxfree_after_period can also be None, to signify disabled setting\n if value is None:\n specified_args[key] = value\n else:\n int_value = int(value)\n if int_value <= 0:\n value = None\n msg_aggregator.add_warning(\n f'A negative or zero value ({int_value}) for taxfree_after_period '\n f'ended up in the DB. Setting it to None. Please open an issue in '\n f'Github: https://github.com/rotki/rotki/issues/new/choose',\n )\n\n else:\n value = int_value\n\n specified_args[key] = value\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n specified_args[key] = FiatAsset(str(value))\n elif key == 'anonymized_logs':\n specified_args[key] = read_boolean(value)\n elif key == 'include_gas_costs':\n specified_args[key] = read_boolean(value)\n elif key == 'date_display_format':\n specified_args[key] = str(value)\n elif key == 'premium_should_sync':\n specified_args[key] = read_boolean(value)\n elif key == 'last_write_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_data_upload_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_balance_save':\n specified_args[key] = Timestamp(int(value))\n elif key == 'submit_usage_analytics':\n specified_args[key] = read_boolean(value)\n else:\n msg_aggregator.add_warning(\n f'Unknown DB setting {key} given. Ignoring it. Should not '\n f'happen so please open an issue in Github.',\n )\n\n return DBSettings(**specified_args)\n", "path": "rotkehlchen/db/settings.py"}]} | 1,588 | 276 |
gh_patches_debug_16550 | rasdani/github-patches | git_diff | deepset-ai__haystack-2819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PR stuck after approval and automated "Update Documentation & Code Style" commit
**Describe the bug**
See https://github.com/deepset-ai/haystack/pull/2838
This PR has approval, all tests passed and is ready to merge. However due to the "Update Documentation & Code Style" commit, the required tests have not been executed and it's stuck. Manually executing the Test workflow on this branch does not help: see https://github.com/deepset-ai/haystack/actions/runs/2692003395.
All I can do, is make a minimal change to trigger the Test workflow correctly. That's cumbersome.
**Expected behavior**
Being able to merge after "Update Documentation & Code Style" commit when all tests of the previous commit are green or maybe easier: being able to manually trigger the workflow to run all required tests for merging.
**To Reproduce**
- Create PR with docs change
- Wait till workflows pass correctly and someone approves
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.github/utils/generate_openapi_specs.py`
Content:
```
1 import json
2 from pathlib import Path
3 import os
4 import sys
5 import shutil
6
7 sys.path.append(".")
8 from rest_api.utils import get_openapi_specs, get_app, get_pipelines # pylint: disable=wrong-import-position
9 from haystack import __version__ # pylint: disable=wrong-import-position
10
11 REST_PATH = Path("./rest_api").absolute()
12 PIPELINE_PATH = str(REST_PATH / "pipeline" / "pipeline_empty.haystack-pipeline.yml")
13 APP_PATH = str(REST_PATH / "application.py")
14 DOCS_PATH = Path("./docs") / "_src" / "api" / "openapi"
15
16 os.environ["PIPELINE_YAML_PATH"] = PIPELINE_PATH
17
18 print(f"Loading OpenAPI specs from {APP_PATH} with pipeline at {PIPELINE_PATH}")
19
20 # To initialize the app and the pipelines
21 get_app()
22 get_pipelines()
23
24 # Generate the openapi specs
25 specs = get_openapi_specs()
26
27 # Dump the specs into a JSON file
28 with open(DOCS_PATH / "openapi.json", "w") as f:
29 json.dump(specs, f, indent=4)
30
31 # Remove rc versions of the specs from the folder
32 for specs_file in os.listdir():
33 if os.path.isfile(specs_file) and "rc" in specs_file and Path(specs_file).suffix == ".json":
34 os.remove(specs_file)
35
36 # Add versioned copy
37 shutil.copy(DOCS_PATH / "openapi.json", DOCS_PATH / f"openapi-{__version__}.json")
38
```
Path: `.github/utils/generate_json_schema.py`
Content:
```
1 import sys
2 import logging
3 from pathlib import Path
4
5 logging.basicConfig(level=logging.INFO)
6
7
8 sys.path.append(".")
9 from haystack.nodes._json_schema import update_json_schema
10
11 update_json_schema(destination_path=Path(__file__).parent.parent.parent / "haystack" / "json-schemas")
12
```
Path: `.github/utils/convert_notebooks_into_webpages.py`
Content:
```
1 import re
2
3 from nbconvert import MarkdownExporter
4 import os
5 from pathlib import Path
6
7 headers = {
8 1: """<!---
9 title: "Tutorial 1"
10 metaTitle: "Build Your First QA System"
11 metaDescription: ""
12 slug: "/docs/tutorial1"
13 date: "2020-09-03"
14 id: "tutorial1md"
15 --->""",
16 2: """<!---
17 title: "Tutorial 2"
18 metaTitle: "Fine-tuning a model on your own data"
19 metaDescription: ""
20 slug: "/docs/tutorial2"
21 date: "2020-09-03"
22 id: "tutorial2md"
23 --->""",
24 3: """<!---
25 title: "Tutorial 3"
26 metaTitle: "Build a QA System Without Elasticsearch"
27 metaDescription: ""
28 slug: "/docs/tutorial3"
29 date: "2020-09-03"
30 id: "tutorial3md"
31 --->""",
32 4: """<!---
33 title: "Tutorial 4"
34 metaTitle: "Utilizing existing FAQs for Question Answering"
35 metaDescription: ""
36 slug: "/docs/tutorial4"
37 date: "2020-09-03"
38 id: "tutorial4md"
39 --->""",
40 5: """<!---
41 title: "Tutorial 5"
42 metaTitle: "Evaluation of a QA System"
43 metaDescription: ""
44 slug: "/docs/tutorial5"
45 date: "2020-09-03"
46 id: "tutorial5md"
47 --->""",
48 6: """<!---
49 title: "Tutorial 6"
50 metaTitle: "Better retrieval via Dense Passage Retrieval"
51 metaDescription: ""
52 slug: "/docs/tutorial6"
53 date: "2020-09-03"
54 id: "tutorial6md"
55 --->""",
56 7: """<!---
57 title: "Tutorial 7"
58 metaTitle: "Generative QA with RAG"
59 metaDescription: ""
60 slug: "/docs/tutorial7"
61 date: "2020-11-12"
62 id: "tutorial7md"
63 --->""",
64 8: """<!---
65 title: "Tutorial 8"
66 metaTitle: "Preprocessing"
67 metaDescription: ""
68 slug: "/docs/tutorial8"
69 date: "2021-01-08"
70 id: "tutorial8md"
71 --->""",
72 9: """<!---
73 title: "Tutorial 9"
74 metaTitle: "Training a Dense Passage Retrieval model"
75 metaDescription: ""
76 slug: "/docs/tutorial9"
77 date: "2021-01-08"
78 id: "tutorial9md"
79 --->""",
80 10: """<!---
81 title: "Tutorial 10"
82 metaTitle: "Knowledge Graph QA"
83 metaDescription: ""
84 slug: "/docs/tutorial10"
85 date: "2021-04-06"
86 id: "tutorial10md"
87 --->""",
88 11: """<!---
89 title: "Tutorial 11"
90 metaTitle: "Pipelines"
91 metaDescription: ""
92 slug: "/docs/tutorial11"
93 date: "2021-04-06"
94 id: "tutorial11md"
95 --->""",
96 12: """<!---
97 title: "Tutorial 12"
98 metaTitle: "Generative QA with LFQA"
99 metaDescription: ""
100 slug: "/docs/tutorial12"
101 date: "2021-04-06"
102 id: "tutorial12md"
103 --->""",
104 13: """<!---
105 title: "Tutorial 13"
106 metaTitle: "Question Generation"
107 metaDescription: ""
108 slug: "/docs/tutorial13"
109 date: "2021-08-23"
110 id: "tutorial13md"
111 --->""",
112 14: """<!---
113 title: "Tutorial 14"
114 metaTitle: "Query Classifier Tutorial"
115 metaDescription: ""
116 slug: "/docs/tutorial14"
117 date: "2021-08-23"
118 id: "tutorial14md"
119 --->""",
120 15: """<!---
121 title: "Tutorial 15"
122 metaTitle: "TableQA Tutorial"
123 metaDescription: ""
124 slug: "/docs/tutorial15"
125 date: "2021-10-28"
126 id: "tutorial15md"
127 --->""",
128 16: """<!---
129 title: "Tutorial 16"
130 metaTitle: "DocumentClassifier at Index Time Tutorial"
131 metaDescription: ""
132 slug: "/docs/tutorial16"
133 date: "2021-11-05"
134 id: "tutorial16md"
135 --->""",
136 17: """<!---
137 title: "Tutorial 17"
138 metaTitle: "Audio Tutorial"
139 metaDescription: ""
140 slug: "/docs/tutorial17"
141 date: "2022-06-15"
142 id: "tutorial17md"
143 --->""",
144 18: """<!---
145 title: "Tutorial 18"
146 metaTitle: "GPL Domain Adaptation"
147 metaDescription: ""
148 slug: "/docs/tutorial18"
149 date: "2022-06-22"
150 id: "tutorial18md"
151 --->""",
152 }
153
154
155 def atoi(text):
156 return int(text) if text.isdigit() else text
157
158
159 def natural_keys(text):
160 test = [atoi(c) for c in re.split("(\d+)", text)]
161 return test
162
163
164 dir = Path(__file__).parent.parent.parent / "tutorials"
165
166 notebooks = [x for x in os.listdir(dir) if x[-6:] == ".ipynb"]
167 # sort notebooks based on numbers within name of notebook
168 notebooks = sorted(notebooks, key=lambda x: natural_keys(x))
169
170
171 e = MarkdownExporter(exclude_output=True)
172 for i, nb in enumerate(notebooks):
173 body, resources = e.from_filename(dir / nb)
174 print(f"Processing {dir}/{nb}")
175
176 tutorials_path = Path(__file__).parent.parent.parent / "docs" / "_src" / "tutorials" / "tutorials"
177 with open(tutorials_path / f"{i + 1}.md", "w", encoding="utf-8") as f:
178 try:
179 f.write(headers[i + 1] + "\n\n")
180 except IndexError as e:
181 raise IndexError(
182 "Can't find the header for this tutorial. Have you added it in '.github/utils/convert_notebooks_into_webpages.py'?"
183 )
184 f.write(body)
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/.github/utils/convert_notebooks_into_webpages.py b/.github/utils/convert_notebooks_into_webpages.py
old mode 100644
new mode 100755
--- a/.github/utils/convert_notebooks_into_webpages.py
+++ b/.github/utils/convert_notebooks_into_webpages.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import re
from nbconvert import MarkdownExporter
@@ -142,7 +144,7 @@
id: "tutorial17md"
--->""",
18: """<!---
-title: "Tutorial 18"
+title: "Tutorial 18"
metaTitle: "GPL Domain Adaptation"
metaDescription: ""
slug: "/docs/tutorial18"
diff --git a/.github/utils/generate_json_schema.py b/.github/utils/generate_json_schema.py
old mode 100644
new mode 100755
--- a/.github/utils/generate_json_schema.py
+++ b/.github/utils/generate_json_schema.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import sys
import logging
from pathlib import Path
diff --git a/.github/utils/generate_openapi_specs.py b/.github/utils/generate_openapi_specs.py
old mode 100644
new mode 100755
--- a/.github/utils/generate_openapi_specs.py
+++ b/.github/utils/generate_openapi_specs.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import json
from pathlib import Path
import os
| {"golden_diff": "diff --git a/.github/utils/convert_notebooks_into_webpages.py b/.github/utils/convert_notebooks_into_webpages.py\nold mode 100644\nnew mode 100755\n--- a/.github/utils/convert_notebooks_into_webpages.py\n+++ b/.github/utils/convert_notebooks_into_webpages.py\n@@ -1,3 +1,5 @@\n+#!/usr/bin/env python3\n+\n import re\n \n from nbconvert import MarkdownExporter\n@@ -142,7 +144,7 @@\n id: \"tutorial17md\"\n --->\"\"\",\n 18: \"\"\"<!---\n-title: \"Tutorial 18\" \n+title: \"Tutorial 18\"\n metaTitle: \"GPL Domain Adaptation\"\n metaDescription: \"\"\n slug: \"/docs/tutorial18\"\ndiff --git a/.github/utils/generate_json_schema.py b/.github/utils/generate_json_schema.py\nold mode 100644\nnew mode 100755\n--- a/.github/utils/generate_json_schema.py\n+++ b/.github/utils/generate_json_schema.py\n@@ -1,3 +1,5 @@\n+#!/usr/bin/env python3\n+\n import sys\n import logging\n from pathlib import Path\ndiff --git a/.github/utils/generate_openapi_specs.py b/.github/utils/generate_openapi_specs.py\nold mode 100644\nnew mode 100755\n--- a/.github/utils/generate_openapi_specs.py\n+++ b/.github/utils/generate_openapi_specs.py\n@@ -1,3 +1,5 @@\n+#!/usr/bin/env python3\n+\n import json\n from pathlib import Path\n import os\n", "issue": "PR stuck after approval and automated \"Update Documentation & Code Style\" commit\n**Describe the bug**\r\nSee https://github.com/deepset-ai/haystack/pull/2838\r\nThis PR has approval, all tests passed and is ready to merge. However due to the \"Update Documentation & Code Style\" commit, the required tests have not been executed and it's stuck. Manually executing the Test workflow on this branch does not help: see https://github.com/deepset-ai/haystack/actions/runs/2692003395.\r\n\r\nAll I can do, is make a minimal change to trigger the Test workflow correctly. That's cumbersome.\r\n\r\n**Expected behavior**\r\nBeing able to merge after \"Update Documentation & Code Style\" commit when all tests of the previous commit are green or maybe easier: being able to manually trigger the workflow to run all required tests for merging.\r\n\r\n**To Reproduce**\r\n- Create PR with docs change\r\n- Wait till workflows pass correctly and someone approves\n", "before_files": [{"content": "import json\nfrom pathlib import Path\nimport os\nimport sys\nimport shutil\n\nsys.path.append(\".\")\nfrom rest_api.utils import get_openapi_specs, get_app, get_pipelines # pylint: disable=wrong-import-position\nfrom haystack import __version__ # pylint: disable=wrong-import-position\n\nREST_PATH = Path(\"./rest_api\").absolute()\nPIPELINE_PATH = str(REST_PATH / \"pipeline\" / \"pipeline_empty.haystack-pipeline.yml\")\nAPP_PATH = str(REST_PATH / \"application.py\")\nDOCS_PATH = Path(\"./docs\") / \"_src\" / \"api\" / \"openapi\"\n\nos.environ[\"PIPELINE_YAML_PATH\"] = PIPELINE_PATH\n\nprint(f\"Loading OpenAPI specs from {APP_PATH} with pipeline at {PIPELINE_PATH}\")\n\n# To initialize the app and the pipelines\nget_app()\nget_pipelines()\n\n# Generate the openapi specs\nspecs = get_openapi_specs()\n\n# Dump the specs into a JSON file\nwith open(DOCS_PATH / \"openapi.json\", \"w\") as f:\n json.dump(specs, f, indent=4)\n\n# Remove rc versions of the specs from the folder\nfor specs_file in os.listdir():\n if os.path.isfile(specs_file) and \"rc\" in specs_file and Path(specs_file).suffix == \".json\":\n os.remove(specs_file)\n\n# Add versioned copy\nshutil.copy(DOCS_PATH / \"openapi.json\", DOCS_PATH / f\"openapi-{__version__}.json\")\n", "path": ".github/utils/generate_openapi_specs.py"}, {"content": "import sys\nimport logging\nfrom pathlib import Path\n\nlogging.basicConfig(level=logging.INFO)\n\n\nsys.path.append(\".\")\nfrom haystack.nodes._json_schema import update_json_schema\n\nupdate_json_schema(destination_path=Path(__file__).parent.parent.parent / \"haystack\" / \"json-schemas\")\n", "path": ".github/utils/generate_json_schema.py"}, {"content": "import re\n\nfrom nbconvert import MarkdownExporter\nimport os\nfrom pathlib import Path\n\nheaders = {\n 1: \"\"\"<!---\ntitle: \"Tutorial 1\"\nmetaTitle: \"Build Your First QA System\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial1\"\ndate: \"2020-09-03\"\nid: \"tutorial1md\"\n--->\"\"\",\n 2: \"\"\"<!---\ntitle: \"Tutorial 2\"\nmetaTitle: \"Fine-tuning a model on your own data\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial2\"\ndate: \"2020-09-03\"\nid: \"tutorial2md\"\n--->\"\"\",\n 3: \"\"\"<!---\ntitle: \"Tutorial 3\"\nmetaTitle: \"Build a QA System Without Elasticsearch\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial3\"\ndate: \"2020-09-03\"\nid: \"tutorial3md\"\n--->\"\"\",\n 4: \"\"\"<!---\ntitle: \"Tutorial 4\"\nmetaTitle: \"Utilizing existing FAQs for Question Answering\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial4\"\ndate: \"2020-09-03\"\nid: \"tutorial4md\"\n--->\"\"\",\n 5: \"\"\"<!---\ntitle: \"Tutorial 5\"\nmetaTitle: \"Evaluation of a QA System\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial5\"\ndate: \"2020-09-03\"\nid: \"tutorial5md\"\n--->\"\"\",\n 6: \"\"\"<!---\ntitle: \"Tutorial 6\"\nmetaTitle: \"Better retrieval via Dense Passage Retrieval\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial6\"\ndate: \"2020-09-03\"\nid: \"tutorial6md\"\n--->\"\"\",\n 7: \"\"\"<!---\ntitle: \"Tutorial 7\"\nmetaTitle: \"Generative QA with RAG\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial7\"\ndate: \"2020-11-12\"\nid: \"tutorial7md\"\n--->\"\"\",\n 8: \"\"\"<!---\ntitle: \"Tutorial 8\"\nmetaTitle: \"Preprocessing\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial8\"\ndate: \"2021-01-08\"\nid: \"tutorial8md\"\n--->\"\"\",\n 9: \"\"\"<!---\ntitle: \"Tutorial 9\"\nmetaTitle: \"Training a Dense Passage Retrieval model\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial9\"\ndate: \"2021-01-08\"\nid: \"tutorial9md\"\n--->\"\"\",\n 10: \"\"\"<!---\ntitle: \"Tutorial 10\"\nmetaTitle: \"Knowledge Graph QA\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial10\"\ndate: \"2021-04-06\"\nid: \"tutorial10md\"\n--->\"\"\",\n 11: \"\"\"<!---\ntitle: \"Tutorial 11\"\nmetaTitle: \"Pipelines\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial11\"\ndate: \"2021-04-06\"\nid: \"tutorial11md\"\n--->\"\"\",\n 12: \"\"\"<!---\ntitle: \"Tutorial 12\"\nmetaTitle: \"Generative QA with LFQA\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial12\"\ndate: \"2021-04-06\"\nid: \"tutorial12md\"\n--->\"\"\",\n 13: \"\"\"<!---\ntitle: \"Tutorial 13\"\nmetaTitle: \"Question Generation\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial13\"\ndate: \"2021-08-23\"\nid: \"tutorial13md\"\n--->\"\"\",\n 14: \"\"\"<!---\ntitle: \"Tutorial 14\"\nmetaTitle: \"Query Classifier Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial14\"\ndate: \"2021-08-23\"\nid: \"tutorial14md\"\n--->\"\"\",\n 15: \"\"\"<!---\ntitle: \"Tutorial 15\"\nmetaTitle: \"TableQA Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial15\"\ndate: \"2021-10-28\"\nid: \"tutorial15md\"\n--->\"\"\",\n 16: \"\"\"<!---\ntitle: \"Tutorial 16\"\nmetaTitle: \"DocumentClassifier at Index Time Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial16\"\ndate: \"2021-11-05\"\nid: \"tutorial16md\"\n--->\"\"\",\n 17: \"\"\"<!---\ntitle: \"Tutorial 17\"\nmetaTitle: \"Audio Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial17\"\ndate: \"2022-06-15\"\nid: \"tutorial17md\"\n--->\"\"\",\n 18: \"\"\"<!---\ntitle: \"Tutorial 18\" \nmetaTitle: \"GPL Domain Adaptation\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial18\"\ndate: \"2022-06-22\"\nid: \"tutorial18md\"\n--->\"\"\",\n}\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n test = [atoi(c) for c in re.split(\"(\\d+)\", text)]\n return test\n\n\ndir = Path(__file__).parent.parent.parent / \"tutorials\"\n\nnotebooks = [x for x in os.listdir(dir) if x[-6:] == \".ipynb\"]\n# sort notebooks based on numbers within name of notebook\nnotebooks = sorted(notebooks, key=lambda x: natural_keys(x))\n\n\ne = MarkdownExporter(exclude_output=True)\nfor i, nb in enumerate(notebooks):\n body, resources = e.from_filename(dir / nb)\n print(f\"Processing {dir}/{nb}\")\n\n tutorials_path = Path(__file__).parent.parent.parent / \"docs\" / \"_src\" / \"tutorials\" / \"tutorials\"\n with open(tutorials_path / f\"{i + 1}.md\", \"w\", encoding=\"utf-8\") as f:\n try:\n f.write(headers[i + 1] + \"\\n\\n\")\n except IndexError as e:\n raise IndexError(\n \"Can't find the header for this tutorial. Have you added it in '.github/utils/convert_notebooks_into_webpages.py'?\"\n )\n f.write(body)\n", "path": ".github/utils/convert_notebooks_into_webpages.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport json\nfrom pathlib import Path\nimport os\nimport sys\nimport shutil\n\nsys.path.append(\".\")\nfrom rest_api.utils import get_openapi_specs, get_app, get_pipelines # pylint: disable=wrong-import-position\nfrom haystack import __version__ # pylint: disable=wrong-import-position\n\nREST_PATH = Path(\"./rest_api\").absolute()\nPIPELINE_PATH = str(REST_PATH / \"pipeline\" / \"pipeline_empty.haystack-pipeline.yml\")\nAPP_PATH = str(REST_PATH / \"application.py\")\nDOCS_PATH = Path(\"./docs\") / \"_src\" / \"api\" / \"openapi\"\n\nos.environ[\"PIPELINE_YAML_PATH\"] = PIPELINE_PATH\n\nprint(f\"Loading OpenAPI specs from {APP_PATH} with pipeline at {PIPELINE_PATH}\")\n\n# To initialize the app and the pipelines\nget_app()\nget_pipelines()\n\n# Generate the openapi specs\nspecs = get_openapi_specs()\n\n# Dump the specs into a JSON file\nwith open(DOCS_PATH / \"openapi.json\", \"w\") as f:\n json.dump(specs, f, indent=4)\n\n# Remove rc versions of the specs from the folder\nfor specs_file in os.listdir():\n if os.path.isfile(specs_file) and \"rc\" in specs_file and Path(specs_file).suffix == \".json\":\n os.remove(specs_file)\n\n# Add versioned copy\nshutil.copy(DOCS_PATH / \"openapi.json\", DOCS_PATH / f\"openapi-{__version__}.json\")\n", "path": ".github/utils/generate_openapi_specs.py"}, {"content": "#!/usr/bin/env python3\n\nimport sys\nimport logging\nfrom pathlib import Path\n\nlogging.basicConfig(level=logging.INFO)\n\n\nsys.path.append(\".\")\nfrom haystack.nodes._json_schema import update_json_schema\n\nupdate_json_schema(destination_path=Path(__file__).parent.parent.parent / \"haystack\" / \"json-schemas\")\n", "path": ".github/utils/generate_json_schema.py"}, {"content": "#!/usr/bin/env python3\n\nimport re\n\nfrom nbconvert import MarkdownExporter\nimport os\nfrom pathlib import Path\n\nheaders = {\n 1: \"\"\"<!---\ntitle: \"Tutorial 1\"\nmetaTitle: \"Build Your First QA System\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial1\"\ndate: \"2020-09-03\"\nid: \"tutorial1md\"\n--->\"\"\",\n 2: \"\"\"<!---\ntitle: \"Tutorial 2\"\nmetaTitle: \"Fine-tuning a model on your own data\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial2\"\ndate: \"2020-09-03\"\nid: \"tutorial2md\"\n--->\"\"\",\n 3: \"\"\"<!---\ntitle: \"Tutorial 3\"\nmetaTitle: \"Build a QA System Without Elasticsearch\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial3\"\ndate: \"2020-09-03\"\nid: \"tutorial3md\"\n--->\"\"\",\n 4: \"\"\"<!---\ntitle: \"Tutorial 4\"\nmetaTitle: \"Utilizing existing FAQs for Question Answering\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial4\"\ndate: \"2020-09-03\"\nid: \"tutorial4md\"\n--->\"\"\",\n 5: \"\"\"<!---\ntitle: \"Tutorial 5\"\nmetaTitle: \"Evaluation of a QA System\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial5\"\ndate: \"2020-09-03\"\nid: \"tutorial5md\"\n--->\"\"\",\n 6: \"\"\"<!---\ntitle: \"Tutorial 6\"\nmetaTitle: \"Better retrieval via Dense Passage Retrieval\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial6\"\ndate: \"2020-09-03\"\nid: \"tutorial6md\"\n--->\"\"\",\n 7: \"\"\"<!---\ntitle: \"Tutorial 7\"\nmetaTitle: \"Generative QA with RAG\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial7\"\ndate: \"2020-11-12\"\nid: \"tutorial7md\"\n--->\"\"\",\n 8: \"\"\"<!---\ntitle: \"Tutorial 8\"\nmetaTitle: \"Preprocessing\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial8\"\ndate: \"2021-01-08\"\nid: \"tutorial8md\"\n--->\"\"\",\n 9: \"\"\"<!---\ntitle: \"Tutorial 9\"\nmetaTitle: \"Training a Dense Passage Retrieval model\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial9\"\ndate: \"2021-01-08\"\nid: \"tutorial9md\"\n--->\"\"\",\n 10: \"\"\"<!---\ntitle: \"Tutorial 10\"\nmetaTitle: \"Knowledge Graph QA\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial10\"\ndate: \"2021-04-06\"\nid: \"tutorial10md\"\n--->\"\"\",\n 11: \"\"\"<!---\ntitle: \"Tutorial 11\"\nmetaTitle: \"Pipelines\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial11\"\ndate: \"2021-04-06\"\nid: \"tutorial11md\"\n--->\"\"\",\n 12: \"\"\"<!---\ntitle: \"Tutorial 12\"\nmetaTitle: \"Generative QA with LFQA\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial12\"\ndate: \"2021-04-06\"\nid: \"tutorial12md\"\n--->\"\"\",\n 13: \"\"\"<!---\ntitle: \"Tutorial 13\"\nmetaTitle: \"Question Generation\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial13\"\ndate: \"2021-08-23\"\nid: \"tutorial13md\"\n--->\"\"\",\n 14: \"\"\"<!---\ntitle: \"Tutorial 14\"\nmetaTitle: \"Query Classifier Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial14\"\ndate: \"2021-08-23\"\nid: \"tutorial14md\"\n--->\"\"\",\n 15: \"\"\"<!---\ntitle: \"Tutorial 15\"\nmetaTitle: \"TableQA Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial15\"\ndate: \"2021-10-28\"\nid: \"tutorial15md\"\n--->\"\"\",\n 16: \"\"\"<!---\ntitle: \"Tutorial 16\"\nmetaTitle: \"DocumentClassifier at Index Time Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial16\"\ndate: \"2021-11-05\"\nid: \"tutorial16md\"\n--->\"\"\",\n 17: \"\"\"<!---\ntitle: \"Tutorial 17\"\nmetaTitle: \"Audio Tutorial\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial17\"\ndate: \"2022-06-15\"\nid: \"tutorial17md\"\n--->\"\"\",\n 18: \"\"\"<!---\ntitle: \"Tutorial 18\"\nmetaTitle: \"GPL Domain Adaptation\"\nmetaDescription: \"\"\nslug: \"/docs/tutorial18\"\ndate: \"2022-06-22\"\nid: \"tutorial18md\"\n--->\"\"\",\n}\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n test = [atoi(c) for c in re.split(\"(\\d+)\", text)]\n return test\n\n\ndir = Path(__file__).parent.parent.parent / \"tutorials\"\n\nnotebooks = [x for x in os.listdir(dir) if x[-6:] == \".ipynb\"]\n# sort notebooks based on numbers within name of notebook\nnotebooks = sorted(notebooks, key=lambda x: natural_keys(x))\n\n\ne = MarkdownExporter(exclude_output=True)\nfor i, nb in enumerate(notebooks):\n body, resources = e.from_filename(dir / nb)\n print(f\"Processing {dir}/{nb}\")\n\n tutorials_path = Path(__file__).parent.parent.parent / \"docs\" / \"_src\" / \"tutorials\" / \"tutorials\"\n with open(tutorials_path / f\"{i + 1}.md\", \"w\", encoding=\"utf-8\") as f:\n try:\n f.write(headers[i + 1] + \"\\n\\n\")\n except IndexError as e:\n raise IndexError(\n \"Can't find the header for this tutorial. Have you added it in '.github/utils/convert_notebooks_into_webpages.py'?\"\n )\n f.write(body)\n", "path": ".github/utils/convert_notebooks_into_webpages.py"}]} | 2,803 | 368 |
gh_patches_debug_16283 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-6279 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]bug in ConvFCBBoxHead's init cfg.
https://github.com/open-mmlab/mmdetection/blob/c88509cb9a73d6bd1edcba64eb924d3cf3cfe85d/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L103
This line will override initializers for fc_cls and fc_reg because they are also nn.Linear.
Or is it what's intended? But I see the old way to initialize fc_cls and fc_reg is using Normal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import torch.nn as nn
3 from mmcv.cnn import ConvModule
4
5 from mmdet.models.builder import HEADS
6 from mmdet.models.utils import build_linear_layer
7 from .bbox_head import BBoxHead
8
9
10 @HEADS.register_module()
11 class ConvFCBBoxHead(BBoxHead):
12 r"""More general bbox head, with shared conv and fc layers and two optional
13 separated branches.
14
15 .. code-block:: none
16
17 /-> cls convs -> cls fcs -> cls
18 shared convs -> shared fcs
19 \-> reg convs -> reg fcs -> reg
20 """ # noqa: W605
21
22 def __init__(self,
23 num_shared_convs=0,
24 num_shared_fcs=0,
25 num_cls_convs=0,
26 num_cls_fcs=0,
27 num_reg_convs=0,
28 num_reg_fcs=0,
29 conv_out_channels=256,
30 fc_out_channels=1024,
31 conv_cfg=None,
32 norm_cfg=None,
33 init_cfg=None,
34 *args,
35 **kwargs):
36 super(ConvFCBBoxHead, self).__init__(
37 *args, init_cfg=init_cfg, **kwargs)
38 assert (num_shared_convs + num_shared_fcs + num_cls_convs +
39 num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
40 if num_cls_convs > 0 or num_reg_convs > 0:
41 assert num_shared_fcs == 0
42 if not self.with_cls:
43 assert num_cls_convs == 0 and num_cls_fcs == 0
44 if not self.with_reg:
45 assert num_reg_convs == 0 and num_reg_fcs == 0
46 self.num_shared_convs = num_shared_convs
47 self.num_shared_fcs = num_shared_fcs
48 self.num_cls_convs = num_cls_convs
49 self.num_cls_fcs = num_cls_fcs
50 self.num_reg_convs = num_reg_convs
51 self.num_reg_fcs = num_reg_fcs
52 self.conv_out_channels = conv_out_channels
53 self.fc_out_channels = fc_out_channels
54 self.conv_cfg = conv_cfg
55 self.norm_cfg = norm_cfg
56
57 # add shared convs and fcs
58 self.shared_convs, self.shared_fcs, last_layer_dim = \
59 self._add_conv_fc_branch(
60 self.num_shared_convs, self.num_shared_fcs, self.in_channels,
61 True)
62 self.shared_out_channels = last_layer_dim
63
64 # add cls specific branch
65 self.cls_convs, self.cls_fcs, self.cls_last_dim = \
66 self._add_conv_fc_branch(
67 self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
68
69 # add reg specific branch
70 self.reg_convs, self.reg_fcs, self.reg_last_dim = \
71 self._add_conv_fc_branch(
72 self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
73
74 if self.num_shared_fcs == 0 and not self.with_avg_pool:
75 if self.num_cls_fcs == 0:
76 self.cls_last_dim *= self.roi_feat_area
77 if self.num_reg_fcs == 0:
78 self.reg_last_dim *= self.roi_feat_area
79
80 self.relu = nn.ReLU(inplace=True)
81 # reconstruct fc_cls and fc_reg since input channels are changed
82 if self.with_cls:
83 if self.custom_cls_channels:
84 cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
85 else:
86 cls_channels = self.num_classes + 1
87 self.fc_cls = build_linear_layer(
88 self.cls_predictor_cfg,
89 in_features=self.cls_last_dim,
90 out_features=cls_channels)
91 if self.with_reg:
92 out_dim_reg = (4 if self.reg_class_agnostic else 4 *
93 self.num_classes)
94 self.fc_reg = build_linear_layer(
95 self.reg_predictor_cfg,
96 in_features=self.reg_last_dim,
97 out_features=out_dim_reg)
98
99 if init_cfg is None:
100 self.init_cfg += [
101 dict(
102 type='Xavier',
103 layer='Linear',
104 override=[
105 dict(name='shared_fcs'),
106 dict(name='cls_fcs'),
107 dict(name='reg_fcs')
108 ])
109 ]
110
111 def _add_conv_fc_branch(self,
112 num_branch_convs,
113 num_branch_fcs,
114 in_channels,
115 is_shared=False):
116 """Add shared or separable branch.
117
118 convs -> avg pool (optional) -> fcs
119 """
120 last_layer_dim = in_channels
121 # add branch specific conv layers
122 branch_convs = nn.ModuleList()
123 if num_branch_convs > 0:
124 for i in range(num_branch_convs):
125 conv_in_channels = (
126 last_layer_dim if i == 0 else self.conv_out_channels)
127 branch_convs.append(
128 ConvModule(
129 conv_in_channels,
130 self.conv_out_channels,
131 3,
132 padding=1,
133 conv_cfg=self.conv_cfg,
134 norm_cfg=self.norm_cfg))
135 last_layer_dim = self.conv_out_channels
136 # add branch specific fc layers
137 branch_fcs = nn.ModuleList()
138 if num_branch_fcs > 0:
139 # for shared branch, only consider self.with_avg_pool
140 # for separated branches, also consider self.num_shared_fcs
141 if (is_shared
142 or self.num_shared_fcs == 0) and not self.with_avg_pool:
143 last_layer_dim *= self.roi_feat_area
144 for i in range(num_branch_fcs):
145 fc_in_channels = (
146 last_layer_dim if i == 0 else self.fc_out_channels)
147 branch_fcs.append(
148 nn.Linear(fc_in_channels, self.fc_out_channels))
149 last_layer_dim = self.fc_out_channels
150 return branch_convs, branch_fcs, last_layer_dim
151
152 def forward(self, x):
153 # shared part
154 if self.num_shared_convs > 0:
155 for conv in self.shared_convs:
156 x = conv(x)
157
158 if self.num_shared_fcs > 0:
159 if self.with_avg_pool:
160 x = self.avg_pool(x)
161
162 x = x.flatten(1)
163
164 for fc in self.shared_fcs:
165 x = self.relu(fc(x))
166 # separate branches
167 x_cls = x
168 x_reg = x
169
170 for conv in self.cls_convs:
171 x_cls = conv(x_cls)
172 if x_cls.dim() > 2:
173 if self.with_avg_pool:
174 x_cls = self.avg_pool(x_cls)
175 x_cls = x_cls.flatten(1)
176 for fc in self.cls_fcs:
177 x_cls = self.relu(fc(x_cls))
178
179 for conv in self.reg_convs:
180 x_reg = conv(x_reg)
181 if x_reg.dim() > 2:
182 if self.with_avg_pool:
183 x_reg = self.avg_pool(x_reg)
184 x_reg = x_reg.flatten(1)
185 for fc in self.reg_fcs:
186 x_reg = self.relu(fc(x_reg))
187
188 cls_score = self.fc_cls(x_cls) if self.with_cls else None
189 bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
190 return cls_score, bbox_pred
191
192
193 @HEADS.register_module()
194 class Shared2FCBBoxHead(ConvFCBBoxHead):
195
196 def __init__(self, fc_out_channels=1024, *args, **kwargs):
197 super(Shared2FCBBoxHead, self).__init__(
198 num_shared_convs=0,
199 num_shared_fcs=2,
200 num_cls_convs=0,
201 num_cls_fcs=0,
202 num_reg_convs=0,
203 num_reg_fcs=0,
204 fc_out_channels=fc_out_channels,
205 *args,
206 **kwargs)
207
208
209 @HEADS.register_module()
210 class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
211
212 def __init__(self, fc_out_channels=1024, *args, **kwargs):
213 super(Shared4Conv1FCBBoxHead, self).__init__(
214 num_shared_convs=4,
215 num_shared_fcs=1,
216 num_cls_convs=0,
217 num_cls_fcs=0,
218 num_reg_convs=0,
219 num_reg_fcs=0,
220 fc_out_channels=fc_out_channels,
221 *args,
222 **kwargs)
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
--- a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
+++ b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
@@ -97,10 +97,16 @@
out_features=out_dim_reg)
if init_cfg is None:
+ # when init_cfg is None,
+ # It has been set to
+ # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],
+ # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]
+ # after `super(ConvFCBBoxHead, self).__init__()`
+ # we only need to append additional configuration
+ # for `shared_fcs`, `cls_fcs` and `reg_fcs`
self.init_cfg += [
dict(
type='Xavier',
- layer='Linear',
override=[
dict(name='shared_fcs'),
dict(name='cls_fcs'),
| {"golden_diff": "diff --git a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py\n--- a/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py\n+++ b/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py\n@@ -97,10 +97,16 @@\n out_features=out_dim_reg)\n \n if init_cfg is None:\n+ # when init_cfg is None,\n+ # It has been set to\n+ # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],\n+ # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]\n+ # after `super(ConvFCBBoxHead, self).__init__()`\n+ # we only need to append additional configuration\n+ # for `shared_fcs`, `cls_fcs` and `reg_fcs`\n self.init_cfg += [\n dict(\n type='Xavier',\n- layer='Linear',\n override=[\n dict(name='shared_fcs'),\n dict(name='cls_fcs'),\n", "issue": "[BUG]bug in ConvFCBBoxHead's init cfg.\nhttps://github.com/open-mmlab/mmdetection/blob/c88509cb9a73d6bd1edcba64eb924d3cf3cfe85d/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L103\r\n\r\nThis line will override initializers for fc_cls and fc_reg because they are also nn.Linear. \r\nOr is it what's intended? But I see the old way to initialize fc_cls and fc_reg is using Normal.\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.utils import build_linear_layer\nfrom .bbox_head import BBoxHead\n\n\[email protected]_module()\nclass ConvFCBBoxHead(BBoxHead):\n r\"\"\"More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_shared_convs=0,\n num_shared_fcs=0,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n conv_out_channels=256,\n fc_out_channels=1024,\n conv_cfg=None,\n norm_cfg=None,\n init_cfg=None,\n *args,\n **kwargs):\n super(ConvFCBBoxHead, self).__init__(\n *args, init_cfg=init_cfg, **kwargs)\n assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n if num_cls_convs > 0 or num_reg_convs > 0:\n assert num_shared_fcs == 0\n if not self.with_cls:\n assert num_cls_convs == 0 and num_cls_fcs == 0\n if not self.with_reg:\n assert num_reg_convs == 0 and num_reg_fcs == 0\n self.num_shared_convs = num_shared_convs\n self.num_shared_fcs = num_shared_fcs\n self.num_cls_convs = num_cls_convs\n self.num_cls_fcs = num_cls_fcs\n self.num_reg_convs = num_reg_convs\n self.num_reg_fcs = num_reg_fcs\n self.conv_out_channels = conv_out_channels\n self.fc_out_channels = fc_out_channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n # add shared convs and fcs\n self.shared_convs, self.shared_fcs, last_layer_dim = \\\n self._add_conv_fc_branch(\n self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n True)\n self.shared_out_channels = last_layer_dim\n\n # add cls specific branch\n self.cls_convs, self.cls_fcs, self.cls_last_dim = \\\n self._add_conv_fc_branch(\n self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n\n # add reg specific branch\n self.reg_convs, self.reg_fcs, self.reg_last_dim = \\\n self._add_conv_fc_branch(\n self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n\n if self.num_shared_fcs == 0 and not self.with_avg_pool:\n if self.num_cls_fcs == 0:\n self.cls_last_dim *= self.roi_feat_area\n if self.num_reg_fcs == 0:\n self.reg_last_dim *= self.roi_feat_area\n\n self.relu = nn.ReLU(inplace=True)\n # reconstruct fc_cls and fc_reg since input channels are changed\n if self.with_cls:\n if self.custom_cls_channels:\n cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n else:\n cls_channels = self.num_classes + 1\n self.fc_cls = build_linear_layer(\n self.cls_predictor_cfg,\n in_features=self.cls_last_dim,\n out_features=cls_channels)\n if self.with_reg:\n out_dim_reg = (4 if self.reg_class_agnostic else 4 *\n self.num_classes)\n self.fc_reg = build_linear_layer(\n self.reg_predictor_cfg,\n in_features=self.reg_last_dim,\n out_features=out_dim_reg)\n\n if init_cfg is None:\n self.init_cfg += [\n dict(\n type='Xavier',\n layer='Linear',\n override=[\n dict(name='shared_fcs'),\n dict(name='cls_fcs'),\n dict(name='reg_fcs')\n ])\n ]\n\n def _add_conv_fc_branch(self,\n num_branch_convs,\n num_branch_fcs,\n in_channels,\n is_shared=False):\n \"\"\"Add shared or separable branch.\n\n convs -> avg pool (optional) -> fcs\n \"\"\"\n last_layer_dim = in_channels\n # add branch specific conv layers\n branch_convs = nn.ModuleList()\n if num_branch_convs > 0:\n for i in range(num_branch_convs):\n conv_in_channels = (\n last_layer_dim if i == 0 else self.conv_out_channels)\n branch_convs.append(\n ConvModule(\n conv_in_channels,\n self.conv_out_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n last_layer_dim = self.conv_out_channels\n # add branch specific fc layers\n branch_fcs = nn.ModuleList()\n if num_branch_fcs > 0:\n # for shared branch, only consider self.with_avg_pool\n # for separated branches, also consider self.num_shared_fcs\n if (is_shared\n or self.num_shared_fcs == 0) and not self.with_avg_pool:\n last_layer_dim *= self.roi_feat_area\n for i in range(num_branch_fcs):\n fc_in_channels = (\n last_layer_dim if i == 0 else self.fc_out_channels)\n branch_fcs.append(\n nn.Linear(fc_in_channels, self.fc_out_channels))\n last_layer_dim = self.fc_out_channels\n return branch_convs, branch_fcs, last_layer_dim\n\n def forward(self, x):\n # shared part\n if self.num_shared_convs > 0:\n for conv in self.shared_convs:\n x = conv(x)\n\n if self.num_shared_fcs > 0:\n if self.with_avg_pool:\n x = self.avg_pool(x)\n\n x = x.flatten(1)\n\n for fc in self.shared_fcs:\n x = self.relu(fc(x))\n # separate branches\n x_cls = x\n x_reg = x\n\n for conv in self.cls_convs:\n x_cls = conv(x_cls)\n if x_cls.dim() > 2:\n if self.with_avg_pool:\n x_cls = self.avg_pool(x_cls)\n x_cls = x_cls.flatten(1)\n for fc in self.cls_fcs:\n x_cls = self.relu(fc(x_cls))\n\n for conv in self.reg_convs:\n x_reg = conv(x_reg)\n if x_reg.dim() > 2:\n if self.with_avg_pool:\n x_reg = self.avg_pool(x_reg)\n x_reg = x_reg.flatten(1)\n for fc in self.reg_fcs:\n x_reg = self.relu(fc(x_reg))\n\n cls_score = self.fc_cls(x_cls) if self.with_cls else None\n bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n return cls_score, bbox_pred\n\n\[email protected]_module()\nclass Shared2FCBBoxHead(ConvFCBBoxHead):\n\n def __init__(self, fc_out_channels=1024, *args, **kwargs):\n super(Shared2FCBBoxHead, self).__init__(\n num_shared_convs=0,\n num_shared_fcs=2,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n\n\[email protected]_module()\nclass Shared4Conv1FCBBoxHead(ConvFCBBoxHead):\n\n def __init__(self, fc_out_channels=1024, *args, **kwargs):\n super(Shared4Conv1FCBBoxHead, self).__init__(\n num_shared_convs=4,\n num_shared_fcs=1,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n", "path": "mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.utils import build_linear_layer\nfrom .bbox_head import BBoxHead\n\n\[email protected]_module()\nclass ConvFCBBoxHead(BBoxHead):\n r\"\"\"More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_shared_convs=0,\n num_shared_fcs=0,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n conv_out_channels=256,\n fc_out_channels=1024,\n conv_cfg=None,\n norm_cfg=None,\n init_cfg=None,\n *args,\n **kwargs):\n super(ConvFCBBoxHead, self).__init__(\n *args, init_cfg=init_cfg, **kwargs)\n assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n if num_cls_convs > 0 or num_reg_convs > 0:\n assert num_shared_fcs == 0\n if not self.with_cls:\n assert num_cls_convs == 0 and num_cls_fcs == 0\n if not self.with_reg:\n assert num_reg_convs == 0 and num_reg_fcs == 0\n self.num_shared_convs = num_shared_convs\n self.num_shared_fcs = num_shared_fcs\n self.num_cls_convs = num_cls_convs\n self.num_cls_fcs = num_cls_fcs\n self.num_reg_convs = num_reg_convs\n self.num_reg_fcs = num_reg_fcs\n self.conv_out_channels = conv_out_channels\n self.fc_out_channels = fc_out_channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n # add shared convs and fcs\n self.shared_convs, self.shared_fcs, last_layer_dim = \\\n self._add_conv_fc_branch(\n self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n True)\n self.shared_out_channels = last_layer_dim\n\n # add cls specific branch\n self.cls_convs, self.cls_fcs, self.cls_last_dim = \\\n self._add_conv_fc_branch(\n self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n\n # add reg specific branch\n self.reg_convs, self.reg_fcs, self.reg_last_dim = \\\n self._add_conv_fc_branch(\n self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n\n if self.num_shared_fcs == 0 and not self.with_avg_pool:\n if self.num_cls_fcs == 0:\n self.cls_last_dim *= self.roi_feat_area\n if self.num_reg_fcs == 0:\n self.reg_last_dim *= self.roi_feat_area\n\n self.relu = nn.ReLU(inplace=True)\n # reconstruct fc_cls and fc_reg since input channels are changed\n if self.with_cls:\n if self.custom_cls_channels:\n cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n else:\n cls_channels = self.num_classes + 1\n self.fc_cls = build_linear_layer(\n self.cls_predictor_cfg,\n in_features=self.cls_last_dim,\n out_features=cls_channels)\n if self.with_reg:\n out_dim_reg = (4 if self.reg_class_agnostic else 4 *\n self.num_classes)\n self.fc_reg = build_linear_layer(\n self.reg_predictor_cfg,\n in_features=self.reg_last_dim,\n out_features=out_dim_reg)\n\n if init_cfg is None:\n # when init_cfg is None,\n # It has been set to\n # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],\n # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]\n # after `super(ConvFCBBoxHead, self).__init__()`\n # we only need to append additional configuration\n # for `shared_fcs`, `cls_fcs` and `reg_fcs`\n self.init_cfg += [\n dict(\n type='Xavier',\n override=[\n dict(name='shared_fcs'),\n dict(name='cls_fcs'),\n dict(name='reg_fcs')\n ])\n ]\n\n def _add_conv_fc_branch(self,\n num_branch_convs,\n num_branch_fcs,\n in_channels,\n is_shared=False):\n \"\"\"Add shared or separable branch.\n\n convs -> avg pool (optional) -> fcs\n \"\"\"\n last_layer_dim = in_channels\n # add branch specific conv layers\n branch_convs = nn.ModuleList()\n if num_branch_convs > 0:\n for i in range(num_branch_convs):\n conv_in_channels = (\n last_layer_dim if i == 0 else self.conv_out_channels)\n branch_convs.append(\n ConvModule(\n conv_in_channels,\n self.conv_out_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n last_layer_dim = self.conv_out_channels\n # add branch specific fc layers\n branch_fcs = nn.ModuleList()\n if num_branch_fcs > 0:\n # for shared branch, only consider self.with_avg_pool\n # for separated branches, also consider self.num_shared_fcs\n if (is_shared\n or self.num_shared_fcs == 0) and not self.with_avg_pool:\n last_layer_dim *= self.roi_feat_area\n for i in range(num_branch_fcs):\n fc_in_channels = (\n last_layer_dim if i == 0 else self.fc_out_channels)\n branch_fcs.append(\n nn.Linear(fc_in_channels, self.fc_out_channels))\n last_layer_dim = self.fc_out_channels\n return branch_convs, branch_fcs, last_layer_dim\n\n def forward(self, x):\n # shared part\n if self.num_shared_convs > 0:\n for conv in self.shared_convs:\n x = conv(x)\n\n if self.num_shared_fcs > 0:\n if self.with_avg_pool:\n x = self.avg_pool(x)\n\n x = x.flatten(1)\n\n for fc in self.shared_fcs:\n x = self.relu(fc(x))\n # separate branches\n x_cls = x\n x_reg = x\n\n for conv in self.cls_convs:\n x_cls = conv(x_cls)\n if x_cls.dim() > 2:\n if self.with_avg_pool:\n x_cls = self.avg_pool(x_cls)\n x_cls = x_cls.flatten(1)\n for fc in self.cls_fcs:\n x_cls = self.relu(fc(x_cls))\n\n for conv in self.reg_convs:\n x_reg = conv(x_reg)\n if x_reg.dim() > 2:\n if self.with_avg_pool:\n x_reg = self.avg_pool(x_reg)\n x_reg = x_reg.flatten(1)\n for fc in self.reg_fcs:\n x_reg = self.relu(fc(x_reg))\n\n cls_score = self.fc_cls(x_cls) if self.with_cls else None\n bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n return cls_score, bbox_pred\n\n\[email protected]_module()\nclass Shared2FCBBoxHead(ConvFCBBoxHead):\n\n def __init__(self, fc_out_channels=1024, *args, **kwargs):\n super(Shared2FCBBoxHead, self).__init__(\n num_shared_convs=0,\n num_shared_fcs=2,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n\n\[email protected]_module()\nclass Shared4Conv1FCBBoxHead(ConvFCBBoxHead):\n\n def __init__(self, fc_out_channels=1024, *args, **kwargs):\n super(Shared4Conv1FCBBoxHead, self).__init__(\n num_shared_convs=4,\n num_shared_fcs=1,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n", "path": "mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py"}]} | 2,790 | 261 |
gh_patches_debug_6341 | rasdani/github-patches | git_diff | Kinto__kinto-2214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Straighten out commit_veto situation
In https://github.com/Kinto/kinto-signer/issues/591, we discovered that failing requests do not necessarily abort the transaction. (Exceptions do, but non-exceptional responses with e.g. 503 status codes don't.) There seems to be some confusion about this throughout the codebase; for example see https://github.com/Kinto/kinto-signer/blob/master/kinto_signer/listeners.py#L23-L24. At some point it must have seemed like Pyramid's [commit_veto hook](https://docs.pylonsproject.org/projects/pyramid_tm/en/latest/#adding-a-commit-veto-hook) would solve this automatically for us, but actually rereading this I see that "By default, `pyramid_tm` does not configure a commit veto into the system; you must do it explicitly."
However, the docstring for our [notify_resource_events_before](https://github.com/Kinto/kinto/blob/master/kinto/core/events.py#L152) *claims* to be a commit veto hook, muddying the waters somewhat. This code was added in https://github.com/Kinto/kinto/pull/1263, when we discovered that running "before resource changed" events as a "before commit" hook was invalid and we had to figure out a better approach.
- It seems like at a very minimum, the docstring here is invalid. I believe this is not really a veto but rather just part of the events machinery.
- Should there be a `commit_veto` hook? Personally I would find it surprising that 4xx/5xx responses always automatically abort the transaction, even if they were not driven by exceptions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/events.py`
Content:
```
1 import logging
2 import warnings
3 from collections import OrderedDict
4
5 import transaction
6 from pyramid.events import NewRequest
7 import pyramid.tweens
8 from enum import Enum
9
10 from kinto.core.utils import strip_uri_prefix
11
12
13 logger = logging.getLogger(__name__)
14
15
16 class ACTIONS(Enum):
17 CREATE = "create"
18 DELETE = "delete"
19 READ = "read"
20 UPDATE = "update"
21
22 @staticmethod
23 def from_string_list(elements):
24 return tuple(ACTIONS(el) for el in elements)
25
26
27 class _ResourceEvent:
28 def __init__(self, payload, request):
29 self.payload = payload
30 self.request = request
31
32 def __repr__(self):
33 return f"<{self.__class__.__name__} action={self.payload['action']} uri={self.payload['uri']}>"
34
35 @property
36 def read_records(self):
37 message = "`read_records` is deprecated, use `read_objects` instead."
38 warnings.warn(message, DeprecationWarning)
39 return self.read_objects
40
41 @property
42 def impacted_records(self):
43 message = "`impacted_records` is deprecated, use `impacted_objects` instead."
44 warnings.warn(message, DeprecationWarning)
45 return self.impacted_objects
46
47
48 class ResourceRead(_ResourceEvent):
49 """Triggered when a resource is being read.
50 """
51
52 def __init__(self, payload, read_objects, request):
53 super().__init__(payload, request)
54 self.read_objects = read_objects
55
56
57 class ResourceChanged(_ResourceEvent):
58 """Triggered when a resource is being changed.
59 """
60
61 def __init__(self, payload, impacted_objects, request):
62 super().__init__(payload, request)
63 self.impacted_objects = impacted_objects
64
65
66 class AfterResourceRead(_ResourceEvent):
67 """Triggered after a resource was successfully read.
68 """
69
70 def __init__(self, payload, read_objects, request):
71 super().__init__(payload, request)
72 self.read_objects = read_objects
73
74
75 class AfterResourceChanged(_ResourceEvent):
76 """Triggered after a resource was successfully changed.
77 """
78
79 def __init__(self, payload, impacted_objects, request):
80 super().__init__(payload, request)
81 self.impacted_objects = impacted_objects
82
83
84 class EventCollector(object):
85 """A collection to gather events emitted over the course of a request.
86
87 Events are gathered by parent id, resource type, and event
88 type. This serves as a primitive normalization so that we can emit
89 fewer events.
90 """
91
92 def __init__(self):
93 self.event_dict = OrderedDict()
94 """The events as collected so far.
95
96 The key of the event_dict is a triple (resource_name,
97 parent_id, action). The value is a triple (impacted, request,
98 payload). If the same (resource_name, parent_id, action) is
99 encountered, we just extend the existing impacted with the new
100 impacted. N.B. this means all values in the payload must not
101 be specific to a single impacted_object. See
102 https://github.com/Kinto/kinto/issues/945 and
103 https://github.com/Kinto/kinto/issues/1731.
104 """
105
106 def add_event(self, resource_name, parent_id, action, payload, impacted, request):
107 key = (resource_name, parent_id, action)
108 if key not in self.event_dict:
109 value = (payload, impacted, request)
110 self.event_dict[key] = value
111 else:
112 old_value = self.event_dict[key]
113 (old_payload, old_impacted, old_request) = old_value
114 # May be a good idea to assert that old_payload == payload here.
115 self.event_dict[key] = (old_payload, old_impacted + impacted, old_request)
116
117 def drain(self):
118 """Return an iterator that removes elements from this EventCollector.
119
120 This can be used to process events while still allowing events
121 to be added (for instance, as part of a cascade where events
122 add other events).
123
124 Items yielded will be of a tuple suitable for using as
125 arguments to EventCollector.add_event.
126 """
127 return EventCollectorDrain(self)
128
129
130 class EventCollectorDrain(object):
131 """An iterator that drains an EventCollector.
132
133 Get one using EventCollector.drain()."""
134
135 def __init__(self, event_collector):
136 self.event_collector = event_collector
137
138 def __iter__(self):
139 return self
140
141 def __next__(self):
142 if self.event_collector.event_dict:
143 # Get the "first" key in insertion order, so as to process
144 # events in the same order they were queued.
145 key = next(iter(self.event_collector.event_dict.keys()))
146 value = self.event_collector.event_dict.pop(key)
147 return key + value
148 else:
149 raise StopIteration
150
151
152 def notify_resource_events_before(handler, registry):
153 """pyramid_tm "commit veto" hook to run ResourceChanged events.
154
155 This hook being a "commit veto" let us tell pyramid_tm to abort
156 the transaction if the ResourceChanged listeners raise.
157 """
158
159 def tween(request):
160 response = handler(request)
161 for event in request.get_resource_events():
162 request.registry.notify(event)
163
164 return response
165
166 return tween
167
168
169 def setup_transaction_hook(config):
170 """
171 Resource events are plugged with the transactions of ``pyramid_tm``.
172
173 Once a transaction is committed, ``AfterResourceRead`` and
174 ``AfterResourceChanged`` events are sent.
175 """
176
177 def _notify_resource_events_after(success, request):
178 """Notify the accumulated resource events if transaction succeeds.
179 """
180 if not success: # pragma: no cover
181 return
182
183 for event in request.get_resource_events(after_commit=True):
184 try:
185 request.registry.notify(event)
186 except Exception:
187 logger.error("Unable to notify", exc_info=True)
188
189 def on_new_request(event):
190 """When a new request comes in, hook on transaction commit.
191 """
192 # Since there is one transaction per batch, ignore subrequests.
193 if hasattr(event.request, "parent"):
194 return
195 current = transaction.get()
196 current.addAfterCommitHook(_notify_resource_events_after, args=(event.request,))
197
198 config.add_subscriber(on_new_request, NewRequest)
199 config.add_tween(
200 "kinto.core.events.notify_resource_events_before", under=pyramid.tweens.EXCVIEW
201 )
202
203
204 def get_resource_events(request, after_commit=False):
205 """Generator to iterate the list of events triggered on resources.
206
207 The list is sorted chronologically (see OrderedDict).
208
209 This drains the resource_events currently in the request, which
210 allows us to process new events as they are added by current
211 events. However, once the iteration is over, we merge all the
212 events we've emitted into a new resource_events, which we store on
213 the request so we can reprocess the same events in an after-commit
214 tween.
215
216 This generator must be completely consumed!
217 """
218 by_resource = request.bound_data.get("resource_events", EventCollector())
219 afterwards = EventCollector()
220
221 for event_call in by_resource.drain():
222 afterwards.add_event(*event_call)
223 (_, _, action, payload, impacted, request) = event_call
224
225 if after_commit:
226 if action == ACTIONS.READ:
227 event_cls = AfterResourceRead
228 else:
229 event_cls = AfterResourceChanged
230 else:
231 if action == ACTIONS.READ:
232 event_cls = ResourceRead
233 else:
234 event_cls = ResourceChanged
235
236 yield event_cls(payload, impacted, request)
237
238 request.bound_data["resource_events"] = afterwards
239
240
241 def notify_resource_event(
242 request, parent_id, timestamp, data, action, old=None, resource_name=None, resource_data=None
243 ):
244 """Request helper to stack a resource event.
245
246 If a similar event (same resource, same action) already occured during the
247 current transaction (e.g. batch) then just extend the impacted objects of
248 the previous one.
249
250 :param resource_name: The name of the resource on which the event
251 happened (taken from the request if not provided).
252 :param resource_data: Information about the resource on which the
253 event is being emitted. Usually contains information about how
254 to find this object in the hierarchy (for instance,
255 ``bucket_id`` and ``collection_id`` for a record). Taken from
256 the request matchdict if absent.
257 :type resource_data: dict
258
259 """
260 if action == ACTIONS.READ:
261 if not isinstance(data, list):
262 data = [data]
263 impacted = data
264 elif action == ACTIONS.CREATE:
265 impacted = [{"new": data}]
266 elif action == ACTIONS.DELETE:
267 if not isinstance(data, list):
268 impacted = [{"new": data, "old": old}]
269 else:
270 impacted = []
271 for i, new in enumerate(data):
272 impacted.append({"new": new, "old": old[i]})
273 else: # ACTIONS.UPDATE:
274 impacted = [{"new": data, "old": old}]
275
276 # Get previously triggered events.
277 events = request.bound_data.setdefault("resource_events", EventCollector())
278
279 resource_name = resource_name or request.current_resource_name
280 matchdict = resource_data or dict(request.matchdict)
281
282 payload = {
283 "timestamp": timestamp,
284 "action": action.value,
285 # Deprecated: don't actually use URI (see #945).
286 "uri": strip_uri_prefix(request.path),
287 "user_id": request.prefixed_userid,
288 "resource_name": resource_name,
289 }
290
291 # Deprecated: don't actually use `resource_name_id` either (see #945).
292 if "id" in request.matchdict:
293 matchdict[resource_name + "_id"] = matchdict.pop("id")
294
295 payload.update(**matchdict)
296
297 events.add_event(resource_name, parent_id, action, payload, impacted, request)
298
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/events.py b/kinto/core/events.py
--- a/kinto/core/events.py
+++ b/kinto/core/events.py
@@ -150,10 +150,12 @@
def notify_resource_events_before(handler, registry):
- """pyramid_tm "commit veto" hook to run ResourceChanged events.
+ """Tween that runs ResourceChanged events.
+
+ This tween runs under EXCVIEW, so exceptions raised by
+ ResourceChanged events are caught and dealt the same as any other
+ exceptions.
- This hook being a "commit veto" let us tell pyramid_tm to abort
- the transaction if the ResourceChanged listeners raise.
"""
def tween(request):
| {"golden_diff": "diff --git a/kinto/core/events.py b/kinto/core/events.py\n--- a/kinto/core/events.py\n+++ b/kinto/core/events.py\n@@ -150,10 +150,12 @@\n \n \n def notify_resource_events_before(handler, registry):\n- \"\"\"pyramid_tm \"commit veto\" hook to run ResourceChanged events.\n+ \"\"\"Tween that runs ResourceChanged events.\n+\n+ This tween runs under EXCVIEW, so exceptions raised by\n+ ResourceChanged events are caught and dealt the same as any other\n+ exceptions.\n \n- This hook being a \"commit veto\" let us tell pyramid_tm to abort\n- the transaction if the ResourceChanged listeners raise.\n \"\"\"\n \n def tween(request):\n", "issue": "Straighten out commit_veto situation\nIn https://github.com/Kinto/kinto-signer/issues/591, we discovered that failing requests do not necessarily abort the transaction. (Exceptions do, but non-exceptional responses with e.g. 503 status codes don't.) There seems to be some confusion about this throughout the codebase; for example see https://github.com/Kinto/kinto-signer/blob/master/kinto_signer/listeners.py#L23-L24. At some point it must have seemed like Pyramid's [commit_veto hook](https://docs.pylonsproject.org/projects/pyramid_tm/en/latest/#adding-a-commit-veto-hook) would solve this automatically for us, but actually rereading this I see that \"By default, `pyramid_tm` does not configure a commit veto into the system; you must do it explicitly.\"\r\n\r\nHowever, the docstring for our [notify_resource_events_before](https://github.com/Kinto/kinto/blob/master/kinto/core/events.py#L152) *claims* to be a commit veto hook, muddying the waters somewhat. This code was added in https://github.com/Kinto/kinto/pull/1263, when we discovered that running \"before resource changed\" events as a \"before commit\" hook was invalid and we had to figure out a better approach.\r\n\r\n- It seems like at a very minimum, the docstring here is invalid. I believe this is not really a veto but rather just part of the events machinery. \r\n- Should there be a `commit_veto` hook? Personally I would find it surprising that 4xx/5xx responses always automatically abort the transaction, even if they were not driven by exceptions.\r\n\n", "before_files": [{"content": "import logging\nimport warnings\nfrom collections import OrderedDict\n\nimport transaction\nfrom pyramid.events import NewRequest\nimport pyramid.tweens\nfrom enum import Enum\n\nfrom kinto.core.utils import strip_uri_prefix\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ACTIONS(Enum):\n CREATE = \"create\"\n DELETE = \"delete\"\n READ = \"read\"\n UPDATE = \"update\"\n\n @staticmethod\n def from_string_list(elements):\n return tuple(ACTIONS(el) for el in elements)\n\n\nclass _ResourceEvent:\n def __init__(self, payload, request):\n self.payload = payload\n self.request = request\n\n def __repr__(self):\n return f\"<{self.__class__.__name__} action={self.payload['action']} uri={self.payload['uri']}>\"\n\n @property\n def read_records(self):\n message = \"`read_records` is deprecated, use `read_objects` instead.\"\n warnings.warn(message, DeprecationWarning)\n return self.read_objects\n\n @property\n def impacted_records(self):\n message = \"`impacted_records` is deprecated, use `impacted_objects` instead.\"\n warnings.warn(message, DeprecationWarning)\n return self.impacted_objects\n\n\nclass ResourceRead(_ResourceEvent):\n \"\"\"Triggered when a resource is being read.\n \"\"\"\n\n def __init__(self, payload, read_objects, request):\n super().__init__(payload, request)\n self.read_objects = read_objects\n\n\nclass ResourceChanged(_ResourceEvent):\n \"\"\"Triggered when a resource is being changed.\n \"\"\"\n\n def __init__(self, payload, impacted_objects, request):\n super().__init__(payload, request)\n self.impacted_objects = impacted_objects\n\n\nclass AfterResourceRead(_ResourceEvent):\n \"\"\"Triggered after a resource was successfully read.\n \"\"\"\n\n def __init__(self, payload, read_objects, request):\n super().__init__(payload, request)\n self.read_objects = read_objects\n\n\nclass AfterResourceChanged(_ResourceEvent):\n \"\"\"Triggered after a resource was successfully changed.\n \"\"\"\n\n def __init__(self, payload, impacted_objects, request):\n super().__init__(payload, request)\n self.impacted_objects = impacted_objects\n\n\nclass EventCollector(object):\n \"\"\"A collection to gather events emitted over the course of a request.\n\n Events are gathered by parent id, resource type, and event\n type. This serves as a primitive normalization so that we can emit\n fewer events.\n \"\"\"\n\n def __init__(self):\n self.event_dict = OrderedDict()\n \"\"\"The events as collected so far.\n\n The key of the event_dict is a triple (resource_name,\n parent_id, action). The value is a triple (impacted, request,\n payload). If the same (resource_name, parent_id, action) is\n encountered, we just extend the existing impacted with the new\n impacted. N.B. this means all values in the payload must not\n be specific to a single impacted_object. See\n https://github.com/Kinto/kinto/issues/945 and\n https://github.com/Kinto/kinto/issues/1731.\n \"\"\"\n\n def add_event(self, resource_name, parent_id, action, payload, impacted, request):\n key = (resource_name, parent_id, action)\n if key not in self.event_dict:\n value = (payload, impacted, request)\n self.event_dict[key] = value\n else:\n old_value = self.event_dict[key]\n (old_payload, old_impacted, old_request) = old_value\n # May be a good idea to assert that old_payload == payload here.\n self.event_dict[key] = (old_payload, old_impacted + impacted, old_request)\n\n def drain(self):\n \"\"\"Return an iterator that removes elements from this EventCollector.\n\n This can be used to process events while still allowing events\n to be added (for instance, as part of a cascade where events\n add other events).\n\n Items yielded will be of a tuple suitable for using as\n arguments to EventCollector.add_event.\n \"\"\"\n return EventCollectorDrain(self)\n\n\nclass EventCollectorDrain(object):\n \"\"\"An iterator that drains an EventCollector.\n\n Get one using EventCollector.drain().\"\"\"\n\n def __init__(self, event_collector):\n self.event_collector = event_collector\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.event_collector.event_dict:\n # Get the \"first\" key in insertion order, so as to process\n # events in the same order they were queued.\n key = next(iter(self.event_collector.event_dict.keys()))\n value = self.event_collector.event_dict.pop(key)\n return key + value\n else:\n raise StopIteration\n\n\ndef notify_resource_events_before(handler, registry):\n \"\"\"pyramid_tm \"commit veto\" hook to run ResourceChanged events.\n\n This hook being a \"commit veto\" let us tell pyramid_tm to abort\n the transaction if the ResourceChanged listeners raise.\n \"\"\"\n\n def tween(request):\n response = handler(request)\n for event in request.get_resource_events():\n request.registry.notify(event)\n\n return response\n\n return tween\n\n\ndef setup_transaction_hook(config):\n \"\"\"\n Resource events are plugged with the transactions of ``pyramid_tm``.\n\n Once a transaction is committed, ``AfterResourceRead`` and\n ``AfterResourceChanged`` events are sent.\n \"\"\"\n\n def _notify_resource_events_after(success, request):\n \"\"\"Notify the accumulated resource events if transaction succeeds.\n \"\"\"\n if not success: # pragma: no cover\n return\n\n for event in request.get_resource_events(after_commit=True):\n try:\n request.registry.notify(event)\n except Exception:\n logger.error(\"Unable to notify\", exc_info=True)\n\n def on_new_request(event):\n \"\"\"When a new request comes in, hook on transaction commit.\n \"\"\"\n # Since there is one transaction per batch, ignore subrequests.\n if hasattr(event.request, \"parent\"):\n return\n current = transaction.get()\n current.addAfterCommitHook(_notify_resource_events_after, args=(event.request,))\n\n config.add_subscriber(on_new_request, NewRequest)\n config.add_tween(\n \"kinto.core.events.notify_resource_events_before\", under=pyramid.tweens.EXCVIEW\n )\n\n\ndef get_resource_events(request, after_commit=False):\n \"\"\"Generator to iterate the list of events triggered on resources.\n\n The list is sorted chronologically (see OrderedDict).\n\n This drains the resource_events currently in the request, which\n allows us to process new events as they are added by current\n events. However, once the iteration is over, we merge all the\n events we've emitted into a new resource_events, which we store on\n the request so we can reprocess the same events in an after-commit\n tween.\n\n This generator must be completely consumed!\n \"\"\"\n by_resource = request.bound_data.get(\"resource_events\", EventCollector())\n afterwards = EventCollector()\n\n for event_call in by_resource.drain():\n afterwards.add_event(*event_call)\n (_, _, action, payload, impacted, request) = event_call\n\n if after_commit:\n if action == ACTIONS.READ:\n event_cls = AfterResourceRead\n else:\n event_cls = AfterResourceChanged\n else:\n if action == ACTIONS.READ:\n event_cls = ResourceRead\n else:\n event_cls = ResourceChanged\n\n yield event_cls(payload, impacted, request)\n\n request.bound_data[\"resource_events\"] = afterwards\n\n\ndef notify_resource_event(\n request, parent_id, timestamp, data, action, old=None, resource_name=None, resource_data=None\n):\n \"\"\"Request helper to stack a resource event.\n\n If a similar event (same resource, same action) already occured during the\n current transaction (e.g. batch) then just extend the impacted objects of\n the previous one.\n\n :param resource_name: The name of the resource on which the event\n happened (taken from the request if not provided).\n :param resource_data: Information about the resource on which the\n event is being emitted. Usually contains information about how\n to find this object in the hierarchy (for instance,\n ``bucket_id`` and ``collection_id`` for a record). Taken from\n the request matchdict if absent.\n :type resource_data: dict\n\n \"\"\"\n if action == ACTIONS.READ:\n if not isinstance(data, list):\n data = [data]\n impacted = data\n elif action == ACTIONS.CREATE:\n impacted = [{\"new\": data}]\n elif action == ACTIONS.DELETE:\n if not isinstance(data, list):\n impacted = [{\"new\": data, \"old\": old}]\n else:\n impacted = []\n for i, new in enumerate(data):\n impacted.append({\"new\": new, \"old\": old[i]})\n else: # ACTIONS.UPDATE:\n impacted = [{\"new\": data, \"old\": old}]\n\n # Get previously triggered events.\n events = request.bound_data.setdefault(\"resource_events\", EventCollector())\n\n resource_name = resource_name or request.current_resource_name\n matchdict = resource_data or dict(request.matchdict)\n\n payload = {\n \"timestamp\": timestamp,\n \"action\": action.value,\n # Deprecated: don't actually use URI (see #945).\n \"uri\": strip_uri_prefix(request.path),\n \"user_id\": request.prefixed_userid,\n \"resource_name\": resource_name,\n }\n\n # Deprecated: don't actually use `resource_name_id` either (see #945).\n if \"id\" in request.matchdict:\n matchdict[resource_name + \"_id\"] = matchdict.pop(\"id\")\n\n payload.update(**matchdict)\n\n events.add_event(resource_name, parent_id, action, payload, impacted, request)\n", "path": "kinto/core/events.py"}], "after_files": [{"content": "import logging\nimport warnings\nfrom collections import OrderedDict\n\nimport transaction\nfrom pyramid.events import NewRequest\nimport pyramid.tweens\nfrom enum import Enum\n\nfrom kinto.core.utils import strip_uri_prefix\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ACTIONS(Enum):\n CREATE = \"create\"\n DELETE = \"delete\"\n READ = \"read\"\n UPDATE = \"update\"\n\n @staticmethod\n def from_string_list(elements):\n return tuple(ACTIONS(el) for el in elements)\n\n\nclass _ResourceEvent:\n def __init__(self, payload, request):\n self.payload = payload\n self.request = request\n\n def __repr__(self):\n return f\"<{self.__class__.__name__} action={self.payload['action']} uri={self.payload['uri']}>\"\n\n @property\n def read_records(self):\n message = \"`read_records` is deprecated, use `read_objects` instead.\"\n warnings.warn(message, DeprecationWarning)\n return self.read_objects\n\n @property\n def impacted_records(self):\n message = \"`impacted_records` is deprecated, use `impacted_objects` instead.\"\n warnings.warn(message, DeprecationWarning)\n return self.impacted_objects\n\n\nclass ResourceRead(_ResourceEvent):\n \"\"\"Triggered when a resource is being read.\n \"\"\"\n\n def __init__(self, payload, read_objects, request):\n super().__init__(payload, request)\n self.read_objects = read_objects\n\n\nclass ResourceChanged(_ResourceEvent):\n \"\"\"Triggered when a resource is being changed.\n \"\"\"\n\n def __init__(self, payload, impacted_objects, request):\n super().__init__(payload, request)\n self.impacted_objects = impacted_objects\n\n\nclass AfterResourceRead(_ResourceEvent):\n \"\"\"Triggered after a resource was successfully read.\n \"\"\"\n\n def __init__(self, payload, read_objects, request):\n super().__init__(payload, request)\n self.read_objects = read_objects\n\n\nclass AfterResourceChanged(_ResourceEvent):\n \"\"\"Triggered after a resource was successfully changed.\n \"\"\"\n\n def __init__(self, payload, impacted_objects, request):\n super().__init__(payload, request)\n self.impacted_objects = impacted_objects\n\n\nclass EventCollector(object):\n \"\"\"A collection to gather events emitted over the course of a request.\n\n Events are gathered by parent id, resource type, and event\n type. This serves as a primitive normalization so that we can emit\n fewer events.\n \"\"\"\n\n def __init__(self):\n self.event_dict = OrderedDict()\n \"\"\"The events as collected so far.\n\n The key of the event_dict is a triple (resource_name,\n parent_id, action). The value is a triple (impacted, request,\n payload). If the same (resource_name, parent_id, action) is\n encountered, we just extend the existing impacted with the new\n impacted. N.B. this means all values in the payload must not\n be specific to a single impacted_object. See\n https://github.com/Kinto/kinto/issues/945 and\n https://github.com/Kinto/kinto/issues/1731.\n \"\"\"\n\n def add_event(self, resource_name, parent_id, action, payload, impacted, request):\n key = (resource_name, parent_id, action)\n if key not in self.event_dict:\n value = (payload, impacted, request)\n self.event_dict[key] = value\n else:\n old_value = self.event_dict[key]\n (old_payload, old_impacted, old_request) = old_value\n # May be a good idea to assert that old_payload == payload here.\n self.event_dict[key] = (old_payload, old_impacted + impacted, old_request)\n\n def drain(self):\n \"\"\"Return an iterator that removes elements from this EventCollector.\n\n This can be used to process events while still allowing events\n to be added (for instance, as part of a cascade where events\n add other events).\n\n Items yielded will be of a tuple suitable for using as\n arguments to EventCollector.add_event.\n \"\"\"\n return EventCollectorDrain(self)\n\n\nclass EventCollectorDrain(object):\n \"\"\"An iterator that drains an EventCollector.\n\n Get one using EventCollector.drain().\"\"\"\n\n def __init__(self, event_collector):\n self.event_collector = event_collector\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.event_collector.event_dict:\n # Get the \"first\" key in insertion order, so as to process\n # events in the same order they were queued.\n key = next(iter(self.event_collector.event_dict.keys()))\n value = self.event_collector.event_dict.pop(key)\n return key + value\n else:\n raise StopIteration\n\n\ndef notify_resource_events_before(handler, registry):\n \"\"\"Tween that runs ResourceChanged events.\n\n This tween runs under EXCVIEW, so exceptions raised by\n ResourceChanged events are caught and dealt the same as any other\n exceptions.\n\n \"\"\"\n\n def tween(request):\n response = handler(request)\n for event in request.get_resource_events():\n request.registry.notify(event)\n\n return response\n\n return tween\n\n\ndef setup_transaction_hook(config):\n \"\"\"\n Resource events are plugged with the transactions of ``pyramid_tm``.\n\n Once a transaction is committed, ``AfterResourceRead`` and\n ``AfterResourceChanged`` events are sent.\n \"\"\"\n\n def _notify_resource_events_after(success, request):\n \"\"\"Notify the accumulated resource events if transaction succeeds.\n \"\"\"\n if not success: # pragma: no cover\n return\n\n for event in request.get_resource_events(after_commit=True):\n try:\n request.registry.notify(event)\n except Exception:\n logger.error(\"Unable to notify\", exc_info=True)\n\n def on_new_request(event):\n \"\"\"When a new request comes in, hook on transaction commit.\n \"\"\"\n # Since there is one transaction per batch, ignore subrequests.\n if hasattr(event.request, \"parent\"):\n return\n current = transaction.get()\n current.addAfterCommitHook(_notify_resource_events_after, args=(event.request,))\n\n config.add_subscriber(on_new_request, NewRequest)\n config.add_tween(\n \"kinto.core.events.notify_resource_events_before\", under=pyramid.tweens.EXCVIEW\n )\n\n\ndef get_resource_events(request, after_commit=False):\n \"\"\"Generator to iterate the list of events triggered on resources.\n\n The list is sorted chronologically (see OrderedDict).\n\n This drains the resource_events currently in the request, which\n allows us to process new events as they are added by current\n events. However, once the iteration is over, we merge all the\n events we've emitted into a new resource_events, which we store on\n the request so we can reprocess the same events in an after-commit\n tween.\n\n This generator must be completely consumed!\n \"\"\"\n by_resource = request.bound_data.get(\"resource_events\", EventCollector())\n afterwards = EventCollector()\n\n for event_call in by_resource.drain():\n afterwards.add_event(*event_call)\n (_, _, action, payload, impacted, request) = event_call\n\n if after_commit:\n if action == ACTIONS.READ:\n event_cls = AfterResourceRead\n else:\n event_cls = AfterResourceChanged\n else:\n if action == ACTIONS.READ:\n event_cls = ResourceRead\n else:\n event_cls = ResourceChanged\n\n yield event_cls(payload, impacted, request)\n\n request.bound_data[\"resource_events\"] = afterwards\n\n\ndef notify_resource_event(\n request, parent_id, timestamp, data, action, old=None, resource_name=None, resource_data=None\n):\n \"\"\"Request helper to stack a resource event.\n\n If a similar event (same resource, same action) already occured during the\n current transaction (e.g. batch) then just extend the impacted objects of\n the previous one.\n\n :param resource_name: The name of the resource on which the event\n happened (taken from the request if not provided).\n :param resource_data: Information about the resource on which the\n event is being emitted. Usually contains information about how\n to find this object in the hierarchy (for instance,\n ``bucket_id`` and ``collection_id`` for a record). Taken from\n the request matchdict if absent.\n :type resource_data: dict\n\n \"\"\"\n if action == ACTIONS.READ:\n if not isinstance(data, list):\n data = [data]\n impacted = data\n elif action == ACTIONS.CREATE:\n impacted = [{\"new\": data}]\n elif action == ACTIONS.DELETE:\n if not isinstance(data, list):\n impacted = [{\"new\": data, \"old\": old}]\n else:\n impacted = []\n for i, new in enumerate(data):\n impacted.append({\"new\": new, \"old\": old[i]})\n else: # ACTIONS.UPDATE:\n impacted = [{\"new\": data, \"old\": old}]\n\n # Get previously triggered events.\n events = request.bound_data.setdefault(\"resource_events\", EventCollector())\n\n resource_name = resource_name or request.current_resource_name\n matchdict = resource_data or dict(request.matchdict)\n\n payload = {\n \"timestamp\": timestamp,\n \"action\": action.value,\n # Deprecated: don't actually use URI (see #945).\n \"uri\": strip_uri_prefix(request.path),\n \"user_id\": request.prefixed_userid,\n \"resource_name\": resource_name,\n }\n\n # Deprecated: don't actually use `resource_name_id` either (see #945).\n if \"id\" in request.matchdict:\n matchdict[resource_name + \"_id\"] = matchdict.pop(\"id\")\n\n payload.update(**matchdict)\n\n events.add_event(resource_name, parent_id, action, payload, impacted, request)\n", "path": "kinto/core/events.py"}]} | 3,552 | 158 |
gh_patches_debug_31954 | rasdani/github-patches | git_diff | urllib3__urllib3-1704 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
contrib/_appengine_environ.py KeyError SERVER_SOFTWARE
https://github.com/urllib3/urllib3/blob/master/src/urllib3/contrib/_appengine_environ.py
In the development environment, the variable SERVER_SOFTWARE is sometimes not set, so the query should read as follows:
`os.environ.get('SERVER_SOFTWARE', '').startswith('Development')`
```
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
os.environ.get('SERVER_SOFTWARE', '').startswith('Development'))
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
not os.environ.get('SERVER_SOFTWARE', '').startswith('Development') and
not is_prod_appengine_mvms())
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/contrib/appengine.py`
Content:
```
1 """
2 This module provides a pool manager that uses Google App Engine's
3 `URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
4
5 Example usage::
6
7 from urllib3 import PoolManager
8 from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
9
10 if is_appengine_sandbox():
11 # AppEngineManager uses AppEngine's URLFetch API behind the scenes
12 http = AppEngineManager()
13 else:
14 # PoolManager uses a socket-level API behind the scenes
15 http = PoolManager()
16
17 r = http.request('GET', 'https://google.com/')
18
19 There are `limitations <https://cloud.google.com/appengine/docs/python/\
20 urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
21 the best choice for your application. There are three options for using
22 urllib3 on Google App Engine:
23
24 1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
25 cost-effective in many circumstances as long as your usage is within the
26 limitations.
27 2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
28 Sockets also have `limitations and restrictions
29 <https://cloud.google.com/appengine/docs/python/sockets/\
30 #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
31 To use sockets, be sure to specify the following in your ``app.yaml``::
32
33 env_variables:
34 GAE_USE_SOCKETS_HTTPLIB : 'true'
35
36 3. If you are using `App Engine Flexible
37 <https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
38 :class:`PoolManager` without any configuration or special environment variables.
39 """
40
41 from __future__ import absolute_import
42 import io
43 import logging
44 import warnings
45 from ..packages.six.moves.urllib.parse import urljoin
46
47 from ..exceptions import (
48 HTTPError,
49 HTTPWarning,
50 MaxRetryError,
51 ProtocolError,
52 TimeoutError,
53 SSLError,
54 )
55
56 from ..request import RequestMethods
57 from ..response import HTTPResponse
58 from ..util.timeout import Timeout
59 from ..util.retry import Retry
60 from . import _appengine_environ
61
62 try:
63 from google.appengine.api import urlfetch
64 except ImportError:
65 urlfetch = None
66
67
68 log = logging.getLogger(__name__)
69
70
71 class AppEnginePlatformWarning(HTTPWarning):
72 pass
73
74
75 class AppEnginePlatformError(HTTPError):
76 pass
77
78
79 class AppEngineManager(RequestMethods):
80 """
81 Connection manager for Google App Engine sandbox applications.
82
83 This manager uses the URLFetch service directly instead of using the
84 emulated httplib, and is subject to URLFetch limitations as described in
85 the App Engine documentation `here
86 <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
87
88 Notably it will raise an :class:`AppEnginePlatformError` if:
89 * URLFetch is not available.
90 * If you attempt to use this on App Engine Flexible, as full socket
91 support is available.
92 * If a request size is more than 10 megabytes.
93 * If a response size is more than 32 megabtyes.
94 * If you use an unsupported request method such as OPTIONS.
95
96 Beyond those cases, it will raise normal urllib3 errors.
97 """
98
99 def __init__(
100 self,
101 headers=None,
102 retries=None,
103 validate_certificate=True,
104 urlfetch_retries=True,
105 ):
106 if not urlfetch:
107 raise AppEnginePlatformError(
108 "URLFetch is not available in this environment."
109 )
110
111 if is_prod_appengine_mvms():
112 raise AppEnginePlatformError(
113 "Use normal urllib3.PoolManager instead of AppEngineManager"
114 "on Managed VMs, as using URLFetch is not necessary in "
115 "this environment."
116 )
117
118 warnings.warn(
119 "urllib3 is using URLFetch on Google App Engine sandbox instead "
120 "of sockets. To use sockets directly instead of URLFetch see "
121 "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
122 AppEnginePlatformWarning,
123 )
124
125 RequestMethods.__init__(self, headers)
126 self.validate_certificate = validate_certificate
127 self.urlfetch_retries = urlfetch_retries
128
129 self.retries = retries or Retry.DEFAULT
130
131 def __enter__(self):
132 return self
133
134 def __exit__(self, exc_type, exc_val, exc_tb):
135 # Return False to re-raise any potential exceptions
136 return False
137
138 def urlopen(
139 self,
140 method,
141 url,
142 body=None,
143 headers=None,
144 retries=None,
145 redirect=True,
146 timeout=Timeout.DEFAULT_TIMEOUT,
147 **response_kw
148 ):
149
150 retries = self._get_retries(retries, redirect)
151
152 try:
153 follow_redirects = redirect and retries.redirect != 0 and retries.total
154 response = urlfetch.fetch(
155 url,
156 payload=body,
157 method=method,
158 headers=headers or {},
159 allow_truncated=False,
160 follow_redirects=self.urlfetch_retries and follow_redirects,
161 deadline=self._get_absolute_timeout(timeout),
162 validate_certificate=self.validate_certificate,
163 )
164 except urlfetch.DeadlineExceededError as e:
165 raise TimeoutError(self, e)
166
167 except urlfetch.InvalidURLError as e:
168 if "too large" in str(e):
169 raise AppEnginePlatformError(
170 "URLFetch request too large, URLFetch only "
171 "supports requests up to 10mb in size.",
172 e,
173 )
174 raise ProtocolError(e)
175
176 except urlfetch.DownloadError as e:
177 if "Too many redirects" in str(e):
178 raise MaxRetryError(self, url, reason=e)
179 raise ProtocolError(e)
180
181 except urlfetch.ResponseTooLargeError as e:
182 raise AppEnginePlatformError(
183 "URLFetch response too large, URLFetch only supports"
184 "responses up to 32mb in size.",
185 e,
186 )
187
188 except urlfetch.SSLCertificateError as e:
189 raise SSLError(e)
190
191 except urlfetch.InvalidMethodError as e:
192 raise AppEnginePlatformError(
193 "URLFetch does not support method: %s" % method, e
194 )
195
196 http_response = self._urlfetch_response_to_http_response(
197 response, retries=retries, **response_kw
198 )
199
200 # Handle redirect?
201 redirect_location = redirect and http_response.get_redirect_location()
202 if redirect_location:
203 # Check for redirect response
204 if self.urlfetch_retries and retries.raise_on_redirect:
205 raise MaxRetryError(self, url, "too many redirects")
206 else:
207 if http_response.status == 303:
208 method = "GET"
209
210 try:
211 retries = retries.increment(
212 method, url, response=http_response, _pool=self
213 )
214 except MaxRetryError:
215 if retries.raise_on_redirect:
216 raise MaxRetryError(self, url, "too many redirects")
217 return http_response
218
219 retries.sleep_for_retry(http_response)
220 log.debug("Redirecting %s -> %s", url, redirect_location)
221 redirect_url = urljoin(url, redirect_location)
222 return self.urlopen(
223 method,
224 redirect_url,
225 body,
226 headers,
227 retries=retries,
228 redirect=redirect,
229 timeout=timeout,
230 **response_kw
231 )
232
233 # Check if we should retry the HTTP response.
234 has_retry_after = bool(http_response.getheader("Retry-After"))
235 if retries.is_retry(method, http_response.status, has_retry_after):
236 retries = retries.increment(method, url, response=http_response, _pool=self)
237 log.debug("Retry: %s", url)
238 retries.sleep(http_response)
239 return self.urlopen(
240 method,
241 url,
242 body=body,
243 headers=headers,
244 retries=retries,
245 redirect=redirect,
246 timeout=timeout,
247 **response_kw
248 )
249
250 return http_response
251
252 def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
253
254 if is_prod_appengine():
255 # Production GAE handles deflate encoding automatically, but does
256 # not remove the encoding header.
257 content_encoding = urlfetch_resp.headers.get("content-encoding")
258
259 if content_encoding == "deflate":
260 del urlfetch_resp.headers["content-encoding"]
261
262 transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
263 # We have a full response's content,
264 # so let's make sure we don't report ourselves as chunked data.
265 if transfer_encoding == "chunked":
266 encodings = transfer_encoding.split(",")
267 encodings.remove("chunked")
268 urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
269
270 original_response = HTTPResponse(
271 # In order for decoding to work, we must present the content as
272 # a file-like object.
273 body=io.BytesIO(urlfetch_resp.content),
274 msg=urlfetch_resp.header_msg,
275 headers=urlfetch_resp.headers,
276 status=urlfetch_resp.status_code,
277 **response_kw
278 )
279
280 return HTTPResponse(
281 body=io.BytesIO(urlfetch_resp.content),
282 headers=urlfetch_resp.headers,
283 status=urlfetch_resp.status_code,
284 original_response=original_response,
285 **response_kw
286 )
287
288 def _get_absolute_timeout(self, timeout):
289 if timeout is Timeout.DEFAULT_TIMEOUT:
290 return None # Defer to URLFetch's default.
291 if isinstance(timeout, Timeout):
292 if timeout._read is not None or timeout._connect is not None:
293 warnings.warn(
294 "URLFetch does not support granular timeout settings, "
295 "reverting to total or default URLFetch timeout.",
296 AppEnginePlatformWarning,
297 )
298 return timeout.total
299 return timeout
300
301 def _get_retries(self, retries, redirect):
302 if not isinstance(retries, Retry):
303 retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
304
305 if retries.connect or retries.read or retries.redirect:
306 warnings.warn(
307 "URLFetch only supports total retries and does not "
308 "recognize connect, read, or redirect retry parameters.",
309 AppEnginePlatformWarning,
310 )
311
312 return retries
313
314
315 # Alias methods from _appengine_environ to maintain public API interface.
316
317 is_appengine = _appengine_environ.is_appengine
318 is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
319 is_local_appengine = _appengine_environ.is_local_appengine
320 is_prod_appengine = _appengine_environ.is_prod_appengine
321 is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
322
```
Path: `src/urllib3/contrib/_appengine_environ.py`
Content:
```
1 """
2 This module provides means to detect the App Engine environment.
3 """
4
5 import os
6
7
8 def is_appengine():
9 return is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()
10
11
12 def is_appengine_sandbox():
13 return is_appengine() and not is_prod_appengine_mvms()
14
15
16 def is_local_appengine():
17 return (
18 "APPENGINE_RUNTIME" in os.environ
19 and "Development/" in os.environ["SERVER_SOFTWARE"]
20 )
21
22
23 def is_prod_appengine():
24 return (
25 "APPENGINE_RUNTIME" in os.environ
26 and "Google App Engine/" in os.environ["SERVER_SOFTWARE"]
27 and not is_prod_appengine_mvms()
28 )
29
30
31 def is_prod_appengine_mvms():
32 return os.environ.get("GAE_VM", False) == "true"
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/contrib/_appengine_environ.py b/src/urllib3/contrib/_appengine_environ.py
--- a/src/urllib3/contrib/_appengine_environ.py
+++ b/src/urllib3/contrib/_appengine_environ.py
@@ -6,27 +6,31 @@
def is_appengine():
- return is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()
+ return "APPENGINE_RUNTIME" in os.environ
def is_appengine_sandbox():
- return is_appengine() and not is_prod_appengine_mvms()
+ """Reports if the app is running in the first generation sandbox.
+
+ The second generation runtimes are technically still in a sandbox, but it
+ is much less restrictive, so generally you shouldn't need to check for it.
+ see https://cloud.google.com/appengine/docs/standard/runtimes
+ """
+ return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
def is_local_appengine():
- return (
- "APPENGINE_RUNTIME" in os.environ
- and "Development/" in os.environ["SERVER_SOFTWARE"]
+ return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith(
+ "Development/"
)
def is_prod_appengine():
- return (
- "APPENGINE_RUNTIME" in os.environ
- and "Google App Engine/" in os.environ["SERVER_SOFTWARE"]
- and not is_prod_appengine_mvms()
+ return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith(
+ "Google App Engine/"
)
def is_prod_appengine_mvms():
- return os.environ.get("GAE_VM", False) == "true"
+ """Deprecated."""
+ return False
diff --git a/src/urllib3/contrib/appengine.py b/src/urllib3/contrib/appengine.py
--- a/src/urllib3/contrib/appengine.py
+++ b/src/urllib3/contrib/appengine.py
@@ -108,13 +108,6 @@
"URLFetch is not available in this environment."
)
- if is_prod_appengine_mvms():
- raise AppEnginePlatformError(
- "Use normal urllib3.PoolManager instead of AppEngineManager"
- "on Managed VMs, as using URLFetch is not necessary in "
- "this environment."
- )
-
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
| {"golden_diff": "diff --git a/src/urllib3/contrib/_appengine_environ.py b/src/urllib3/contrib/_appengine_environ.py\n--- a/src/urllib3/contrib/_appengine_environ.py\n+++ b/src/urllib3/contrib/_appengine_environ.py\n@@ -6,27 +6,31 @@\n \n \n def is_appengine():\n- return is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()\n+ return \"APPENGINE_RUNTIME\" in os.environ\n \n \n def is_appengine_sandbox():\n- return is_appengine() and not is_prod_appengine_mvms()\n+ \"\"\"Reports if the app is running in the first generation sandbox.\n+\n+ The second generation runtimes are technically still in a sandbox, but it\n+ is much less restrictive, so generally you shouldn't need to check for it.\n+ see https://cloud.google.com/appengine/docs/standard/runtimes\n+ \"\"\"\n+ return is_appengine() and os.environ[\"APPENGINE_RUNTIME\"] == \"python27\"\n \n \n def is_local_appengine():\n- return (\n- \"APPENGINE_RUNTIME\" in os.environ\n- and \"Development/\" in os.environ[\"SERVER_SOFTWARE\"]\n+ return is_appengine() and os.environ.get(\"SERVER_SOFTWARE\", \"\").startswith(\n+ \"Development/\"\n )\n \n \n def is_prod_appengine():\n- return (\n- \"APPENGINE_RUNTIME\" in os.environ\n- and \"Google App Engine/\" in os.environ[\"SERVER_SOFTWARE\"]\n- and not is_prod_appengine_mvms()\n+ return is_appengine() and os.environ.get(\"SERVER_SOFTWARE\", \"\").startswith(\n+ \"Google App Engine/\"\n )\n \n \n def is_prod_appengine_mvms():\n- return os.environ.get(\"GAE_VM\", False) == \"true\"\n+ \"\"\"Deprecated.\"\"\"\n+ return False\ndiff --git a/src/urllib3/contrib/appengine.py b/src/urllib3/contrib/appengine.py\n--- a/src/urllib3/contrib/appengine.py\n+++ b/src/urllib3/contrib/appengine.py\n@@ -108,13 +108,6 @@\n \"URLFetch is not available in this environment.\"\n )\n \n- if is_prod_appengine_mvms():\n- raise AppEnginePlatformError(\n- \"Use normal urllib3.PoolManager instead of AppEngineManager\"\n- \"on Managed VMs, as using URLFetch is not necessary in \"\n- \"this environment.\"\n- )\n-\n warnings.warn(\n \"urllib3 is using URLFetch on Google App Engine sandbox instead \"\n \"of sockets. To use sockets directly instead of URLFetch see \"\n", "issue": "contrib/_appengine_environ.py KeyError SERVER_SOFTWARE\nhttps://github.com/urllib3/urllib3/blob/master/src/urllib3/contrib/_appengine_environ.py\r\n\r\nIn the development environment, the variable SERVER_SOFTWARE is sometimes not set, so the query should read as follows:\r\n\r\n`os.environ.get('SERVER_SOFTWARE', '').startswith('Development')`\r\n\r\n```\r\ndef is_local_appengine():\r\n return ('APPENGINE_RUNTIME' in os.environ and\r\n os.environ.get('SERVER_SOFTWARE', '').startswith('Development'))\r\n\r\n\r\ndef is_prod_appengine():\r\n return ('APPENGINE_RUNTIME' in os.environ and\r\n not os.environ.get('SERVER_SOFTWARE', '').startswith('Development') and\r\n not is_prod_appengine_mvms())\r\n```\n", "before_files": [{"content": "\"\"\"\nThis module provides a pool manager that uses Google App Engine's\n`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.\n\nExample usage::\n\n from urllib3 import PoolManager\n from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox\n\n if is_appengine_sandbox():\n # AppEngineManager uses AppEngine's URLFetch API behind the scenes\n http = AppEngineManager()\n else:\n # PoolManager uses a socket-level API behind the scenes\n http = PoolManager()\n\n r = http.request('GET', 'https://google.com/')\n\nThere are `limitations <https://cloud.google.com/appengine/docs/python/\\\nurlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be\nthe best choice for your application. There are three options for using\nurllib3 on Google App Engine:\n\n1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is\n cost-effective in many circumstances as long as your usage is within the\n limitations.\n2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.\n Sockets also have `limitations and restrictions\n <https://cloud.google.com/appengine/docs/python/sockets/\\\n #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.\n To use sockets, be sure to specify the following in your ``app.yaml``::\n\n env_variables:\n GAE_USE_SOCKETS_HTTPLIB : 'true'\n\n3. If you are using `App Engine Flexible\n<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard\n:class:`PoolManager` without any configuration or special environment variables.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport io\nimport logging\nimport warnings\nfrom ..packages.six.moves.urllib.parse import urljoin\n\nfrom ..exceptions import (\n HTTPError,\n HTTPWarning,\n MaxRetryError,\n ProtocolError,\n TimeoutError,\n SSLError,\n)\n\nfrom ..request import RequestMethods\nfrom ..response import HTTPResponse\nfrom ..util.timeout import Timeout\nfrom ..util.retry import Retry\nfrom . import _appengine_environ\n\ntry:\n from google.appengine.api import urlfetch\nexcept ImportError:\n urlfetch = None\n\n\nlog = logging.getLogger(__name__)\n\n\nclass AppEnginePlatformWarning(HTTPWarning):\n pass\n\n\nclass AppEnginePlatformError(HTTPError):\n pass\n\n\nclass AppEngineManager(RequestMethods):\n \"\"\"\n Connection manager for Google App Engine sandbox applications.\n\n This manager uses the URLFetch service directly instead of using the\n emulated httplib, and is subject to URLFetch limitations as described in\n the App Engine documentation `here\n <https://cloud.google.com/appengine/docs/python/urlfetch>`_.\n\n Notably it will raise an :class:`AppEnginePlatformError` if:\n * URLFetch is not available.\n * If you attempt to use this on App Engine Flexible, as full socket\n support is available.\n * If a request size is more than 10 megabytes.\n * If a response size is more than 32 megabtyes.\n * If you use an unsupported request method such as OPTIONS.\n\n Beyond those cases, it will raise normal urllib3 errors.\n \"\"\"\n\n def __init__(\n self,\n headers=None,\n retries=None,\n validate_certificate=True,\n urlfetch_retries=True,\n ):\n if not urlfetch:\n raise AppEnginePlatformError(\n \"URLFetch is not available in this environment.\"\n )\n\n if is_prod_appengine_mvms():\n raise AppEnginePlatformError(\n \"Use normal urllib3.PoolManager instead of AppEngineManager\"\n \"on Managed VMs, as using URLFetch is not necessary in \"\n \"this environment.\"\n )\n\n warnings.warn(\n \"urllib3 is using URLFetch on Google App Engine sandbox instead \"\n \"of sockets. To use sockets directly instead of URLFetch see \"\n \"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.\",\n AppEnginePlatformWarning,\n )\n\n RequestMethods.__init__(self, headers)\n self.validate_certificate = validate_certificate\n self.urlfetch_retries = urlfetch_retries\n\n self.retries = retries or Retry.DEFAULT\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # Return False to re-raise any potential exceptions\n return False\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n timeout=Timeout.DEFAULT_TIMEOUT,\n **response_kw\n ):\n\n retries = self._get_retries(retries, redirect)\n\n try:\n follow_redirects = redirect and retries.redirect != 0 and retries.total\n response = urlfetch.fetch(\n url,\n payload=body,\n method=method,\n headers=headers or {},\n allow_truncated=False,\n follow_redirects=self.urlfetch_retries and follow_redirects,\n deadline=self._get_absolute_timeout(timeout),\n validate_certificate=self.validate_certificate,\n )\n except urlfetch.DeadlineExceededError as e:\n raise TimeoutError(self, e)\n\n except urlfetch.InvalidURLError as e:\n if \"too large\" in str(e):\n raise AppEnginePlatformError(\n \"URLFetch request too large, URLFetch only \"\n \"supports requests up to 10mb in size.\",\n e,\n )\n raise ProtocolError(e)\n\n except urlfetch.DownloadError as e:\n if \"Too many redirects\" in str(e):\n raise MaxRetryError(self, url, reason=e)\n raise ProtocolError(e)\n\n except urlfetch.ResponseTooLargeError as e:\n raise AppEnginePlatformError(\n \"URLFetch response too large, URLFetch only supports\"\n \"responses up to 32mb in size.\",\n e,\n )\n\n except urlfetch.SSLCertificateError as e:\n raise SSLError(e)\n\n except urlfetch.InvalidMethodError as e:\n raise AppEnginePlatformError(\n \"URLFetch does not support method: %s\" % method, e\n )\n\n http_response = self._urlfetch_response_to_http_response(\n response, retries=retries, **response_kw\n )\n\n # Handle redirect?\n redirect_location = redirect and http_response.get_redirect_location()\n if redirect_location:\n # Check for redirect response\n if self.urlfetch_retries and retries.raise_on_redirect:\n raise MaxRetryError(self, url, \"too many redirects\")\n else:\n if http_response.status == 303:\n method = \"GET\"\n\n try:\n retries = retries.increment(\n method, url, response=http_response, _pool=self\n )\n except MaxRetryError:\n if retries.raise_on_redirect:\n raise MaxRetryError(self, url, \"too many redirects\")\n return http_response\n\n retries.sleep_for_retry(http_response)\n log.debug(\"Redirecting %s -> %s\", url, redirect_location)\n redirect_url = urljoin(url, redirect_location)\n return self.urlopen(\n method,\n redirect_url,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n timeout=timeout,\n **response_kw\n )\n\n # Check if we should retry the HTTP response.\n has_retry_after = bool(http_response.getheader(\"Retry-After\"))\n if retries.is_retry(method, http_response.status, has_retry_after):\n retries = retries.increment(method, url, response=http_response, _pool=self)\n log.debug(\"Retry: %s\", url)\n retries.sleep(http_response)\n return self.urlopen(\n method,\n url,\n body=body,\n headers=headers,\n retries=retries,\n redirect=redirect,\n timeout=timeout,\n **response_kw\n )\n\n return http_response\n\n def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):\n\n if is_prod_appengine():\n # Production GAE handles deflate encoding automatically, but does\n # not remove the encoding header.\n content_encoding = urlfetch_resp.headers.get(\"content-encoding\")\n\n if content_encoding == \"deflate\":\n del urlfetch_resp.headers[\"content-encoding\"]\n\n transfer_encoding = urlfetch_resp.headers.get(\"transfer-encoding\")\n # We have a full response's content,\n # so let's make sure we don't report ourselves as chunked data.\n if transfer_encoding == \"chunked\":\n encodings = transfer_encoding.split(\",\")\n encodings.remove(\"chunked\")\n urlfetch_resp.headers[\"transfer-encoding\"] = \",\".join(encodings)\n\n original_response = HTTPResponse(\n # In order for decoding to work, we must present the content as\n # a file-like object.\n body=io.BytesIO(urlfetch_resp.content),\n msg=urlfetch_resp.header_msg,\n headers=urlfetch_resp.headers,\n status=urlfetch_resp.status_code,\n **response_kw\n )\n\n return HTTPResponse(\n body=io.BytesIO(urlfetch_resp.content),\n headers=urlfetch_resp.headers,\n status=urlfetch_resp.status_code,\n original_response=original_response,\n **response_kw\n )\n\n def _get_absolute_timeout(self, timeout):\n if timeout is Timeout.DEFAULT_TIMEOUT:\n return None # Defer to URLFetch's default.\n if isinstance(timeout, Timeout):\n if timeout._read is not None or timeout._connect is not None:\n warnings.warn(\n \"URLFetch does not support granular timeout settings, \"\n \"reverting to total or default URLFetch timeout.\",\n AppEnginePlatformWarning,\n )\n return timeout.total\n return timeout\n\n def _get_retries(self, retries, redirect):\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if retries.connect or retries.read or retries.redirect:\n warnings.warn(\n \"URLFetch only supports total retries and does not \"\n \"recognize connect, read, or redirect retry parameters.\",\n AppEnginePlatformWarning,\n )\n\n return retries\n\n\n# Alias methods from _appengine_environ to maintain public API interface.\n\nis_appengine = _appengine_environ.is_appengine\nis_appengine_sandbox = _appengine_environ.is_appengine_sandbox\nis_local_appengine = _appengine_environ.is_local_appengine\nis_prod_appengine = _appengine_environ.is_prod_appengine\nis_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms\n", "path": "src/urllib3/contrib/appengine.py"}, {"content": "\"\"\"\nThis module provides means to detect the App Engine environment.\n\"\"\"\n\nimport os\n\n\ndef is_appengine():\n return is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()\n\n\ndef is_appengine_sandbox():\n return is_appengine() and not is_prod_appengine_mvms()\n\n\ndef is_local_appengine():\n return (\n \"APPENGINE_RUNTIME\" in os.environ\n and \"Development/\" in os.environ[\"SERVER_SOFTWARE\"]\n )\n\n\ndef is_prod_appengine():\n return (\n \"APPENGINE_RUNTIME\" in os.environ\n and \"Google App Engine/\" in os.environ[\"SERVER_SOFTWARE\"]\n and not is_prod_appengine_mvms()\n )\n\n\ndef is_prod_appengine_mvms():\n return os.environ.get(\"GAE_VM\", False) == \"true\"\n", "path": "src/urllib3/contrib/_appengine_environ.py"}], "after_files": [{"content": "\"\"\"\nThis module provides a pool manager that uses Google App Engine's\n`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.\n\nExample usage::\n\n from urllib3 import PoolManager\n from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox\n\n if is_appengine_sandbox():\n # AppEngineManager uses AppEngine's URLFetch API behind the scenes\n http = AppEngineManager()\n else:\n # PoolManager uses a socket-level API behind the scenes\n http = PoolManager()\n\n r = http.request('GET', 'https://google.com/')\n\nThere are `limitations <https://cloud.google.com/appengine/docs/python/\\\nurlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be\nthe best choice for your application. There are three options for using\nurllib3 on Google App Engine:\n\n1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is\n cost-effective in many circumstances as long as your usage is within the\n limitations.\n2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.\n Sockets also have `limitations and restrictions\n <https://cloud.google.com/appengine/docs/python/sockets/\\\n #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.\n To use sockets, be sure to specify the following in your ``app.yaml``::\n\n env_variables:\n GAE_USE_SOCKETS_HTTPLIB : 'true'\n\n3. If you are using `App Engine Flexible\n<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard\n:class:`PoolManager` without any configuration or special environment variables.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport io\nimport logging\nimport warnings\nfrom ..packages.six.moves.urllib.parse import urljoin\n\nfrom ..exceptions import (\n HTTPError,\n HTTPWarning,\n MaxRetryError,\n ProtocolError,\n TimeoutError,\n SSLError,\n)\n\nfrom ..request import RequestMethods\nfrom ..response import HTTPResponse\nfrom ..util.timeout import Timeout\nfrom ..util.retry import Retry\nfrom . import _appengine_environ\n\ntry:\n from google.appengine.api import urlfetch\nexcept ImportError:\n urlfetch = None\n\n\nlog = logging.getLogger(__name__)\n\n\nclass AppEnginePlatformWarning(HTTPWarning):\n pass\n\n\nclass AppEnginePlatformError(HTTPError):\n pass\n\n\nclass AppEngineManager(RequestMethods):\n \"\"\"\n Connection manager for Google App Engine sandbox applications.\n\n This manager uses the URLFetch service directly instead of using the\n emulated httplib, and is subject to URLFetch limitations as described in\n the App Engine documentation `here\n <https://cloud.google.com/appengine/docs/python/urlfetch>`_.\n\n Notably it will raise an :class:`AppEnginePlatformError` if:\n * URLFetch is not available.\n * If you attempt to use this on App Engine Flexible, as full socket\n support is available.\n * If a request size is more than 10 megabytes.\n * If a response size is more than 32 megabtyes.\n * If you use an unsupported request method such as OPTIONS.\n\n Beyond those cases, it will raise normal urllib3 errors.\n \"\"\"\n\n def __init__(\n self,\n headers=None,\n retries=None,\n validate_certificate=True,\n urlfetch_retries=True,\n ):\n if not urlfetch:\n raise AppEnginePlatformError(\n \"URLFetch is not available in this environment.\"\n )\n\n warnings.warn(\n \"urllib3 is using URLFetch on Google App Engine sandbox instead \"\n \"of sockets. To use sockets directly instead of URLFetch see \"\n \"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.\",\n AppEnginePlatformWarning,\n )\n\n RequestMethods.__init__(self, headers)\n self.validate_certificate = validate_certificate\n self.urlfetch_retries = urlfetch_retries\n\n self.retries = retries or Retry.DEFAULT\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # Return False to re-raise any potential exceptions\n return False\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n timeout=Timeout.DEFAULT_TIMEOUT,\n **response_kw\n ):\n\n retries = self._get_retries(retries, redirect)\n\n try:\n follow_redirects = redirect and retries.redirect != 0 and retries.total\n response = urlfetch.fetch(\n url,\n payload=body,\n method=method,\n headers=headers or {},\n allow_truncated=False,\n follow_redirects=self.urlfetch_retries and follow_redirects,\n deadline=self._get_absolute_timeout(timeout),\n validate_certificate=self.validate_certificate,\n )\n except urlfetch.DeadlineExceededError as e:\n raise TimeoutError(self, e)\n\n except urlfetch.InvalidURLError as e:\n if \"too large\" in str(e):\n raise AppEnginePlatformError(\n \"URLFetch request too large, URLFetch only \"\n \"supports requests up to 10mb in size.\",\n e,\n )\n raise ProtocolError(e)\n\n except urlfetch.DownloadError as e:\n if \"Too many redirects\" in str(e):\n raise MaxRetryError(self, url, reason=e)\n raise ProtocolError(e)\n\n except urlfetch.ResponseTooLargeError as e:\n raise AppEnginePlatformError(\n \"URLFetch response too large, URLFetch only supports\"\n \"responses up to 32mb in size.\",\n e,\n )\n\n except urlfetch.SSLCertificateError as e:\n raise SSLError(e)\n\n except urlfetch.InvalidMethodError as e:\n raise AppEnginePlatformError(\n \"URLFetch does not support method: %s\" % method, e\n )\n\n http_response = self._urlfetch_response_to_http_response(\n response, retries=retries, **response_kw\n )\n\n # Handle redirect?\n redirect_location = redirect and http_response.get_redirect_location()\n if redirect_location:\n # Check for redirect response\n if self.urlfetch_retries and retries.raise_on_redirect:\n raise MaxRetryError(self, url, \"too many redirects\")\n else:\n if http_response.status == 303:\n method = \"GET\"\n\n try:\n retries = retries.increment(\n method, url, response=http_response, _pool=self\n )\n except MaxRetryError:\n if retries.raise_on_redirect:\n raise MaxRetryError(self, url, \"too many redirects\")\n return http_response\n\n retries.sleep_for_retry(http_response)\n log.debug(\"Redirecting %s -> %s\", url, redirect_location)\n redirect_url = urljoin(url, redirect_location)\n return self.urlopen(\n method,\n redirect_url,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n timeout=timeout,\n **response_kw\n )\n\n # Check if we should retry the HTTP response.\n has_retry_after = bool(http_response.getheader(\"Retry-After\"))\n if retries.is_retry(method, http_response.status, has_retry_after):\n retries = retries.increment(method, url, response=http_response, _pool=self)\n log.debug(\"Retry: %s\", url)\n retries.sleep(http_response)\n return self.urlopen(\n method,\n url,\n body=body,\n headers=headers,\n retries=retries,\n redirect=redirect,\n timeout=timeout,\n **response_kw\n )\n\n return http_response\n\n def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):\n\n if is_prod_appengine():\n # Production GAE handles deflate encoding automatically, but does\n # not remove the encoding header.\n content_encoding = urlfetch_resp.headers.get(\"content-encoding\")\n\n if content_encoding == \"deflate\":\n del urlfetch_resp.headers[\"content-encoding\"]\n\n transfer_encoding = urlfetch_resp.headers.get(\"transfer-encoding\")\n # We have a full response's content,\n # so let's make sure we don't report ourselves as chunked data.\n if transfer_encoding == \"chunked\":\n encodings = transfer_encoding.split(\",\")\n encodings.remove(\"chunked\")\n urlfetch_resp.headers[\"transfer-encoding\"] = \",\".join(encodings)\n\n original_response = HTTPResponse(\n # In order for decoding to work, we must present the content as\n # a file-like object.\n body=io.BytesIO(urlfetch_resp.content),\n msg=urlfetch_resp.header_msg,\n headers=urlfetch_resp.headers,\n status=urlfetch_resp.status_code,\n **response_kw\n )\n\n return HTTPResponse(\n body=io.BytesIO(urlfetch_resp.content),\n headers=urlfetch_resp.headers,\n status=urlfetch_resp.status_code,\n original_response=original_response,\n **response_kw\n )\n\n def _get_absolute_timeout(self, timeout):\n if timeout is Timeout.DEFAULT_TIMEOUT:\n return None # Defer to URLFetch's default.\n if isinstance(timeout, Timeout):\n if timeout._read is not None or timeout._connect is not None:\n warnings.warn(\n \"URLFetch does not support granular timeout settings, \"\n \"reverting to total or default URLFetch timeout.\",\n AppEnginePlatformWarning,\n )\n return timeout.total\n return timeout\n\n def _get_retries(self, retries, redirect):\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if retries.connect or retries.read or retries.redirect:\n warnings.warn(\n \"URLFetch only supports total retries and does not \"\n \"recognize connect, read, or redirect retry parameters.\",\n AppEnginePlatformWarning,\n )\n\n return retries\n\n\n# Alias methods from _appengine_environ to maintain public API interface.\n\nis_appengine = _appengine_environ.is_appengine\nis_appengine_sandbox = _appengine_environ.is_appengine_sandbox\nis_local_appengine = _appengine_environ.is_local_appengine\nis_prod_appengine = _appengine_environ.is_prod_appengine\nis_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms\n", "path": "src/urllib3/contrib/appengine.py"}, {"content": "\"\"\"\nThis module provides means to detect the App Engine environment.\n\"\"\"\n\nimport os\n\n\ndef is_appengine():\n return \"APPENGINE_RUNTIME\" in os.environ\n\n\ndef is_appengine_sandbox():\n \"\"\"Reports if the app is running in the first generation sandbox.\n\n The second generation runtimes are technically still in a sandbox, but it\n is much less restrictive, so generally you shouldn't need to check for it.\n see https://cloud.google.com/appengine/docs/standard/runtimes\n \"\"\"\n return is_appengine() and os.environ[\"APPENGINE_RUNTIME\"] == \"python27\"\n\n\ndef is_local_appengine():\n return is_appengine() and os.environ.get(\"SERVER_SOFTWARE\", \"\").startswith(\n \"Development/\"\n )\n\n\ndef is_prod_appengine():\n return is_appengine() and os.environ.get(\"SERVER_SOFTWARE\", \"\").startswith(\n \"Google App Engine/\"\n )\n\n\ndef is_prod_appengine_mvms():\n \"\"\"Deprecated.\"\"\"\n return False\n", "path": "src/urllib3/contrib/_appengine_environ.py"}]} | 3,842 | 583 |
gh_patches_debug_42181 | rasdani/github-patches | git_diff | conan-io__conan-2830 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MSBuild helper useEnv option causes missing include paths in some casess
In using MSBuild helper for the first time, I received a number of errors about headers that could not be found. After spending significant time validating that the project works normally when running msbuild manually, I determined that the breaking flag was `/useEnv:True`.
I briefly looked at the implementation and it was not immediately obvious to me where the problem might be. I have worked around it by using the `get_command` method from the MSBuild helper, which returns a command where the default of `useEnv` is `False`.
It's mysterious why it causes this problem, but if everything is correct, then the requested change would be to simply expose `useEnv` as a parameter in the `build` method of the `MSBuild` helper rather than forcing it to `True`.
I will try to provide a way to reproduce the error, but it's a private project and I don't know if I have time to isolate the problem any time soon. I spent to much time just identifying it.
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/build/msbuild.py`
Content:
```
1 import re
2
3 from conans import tools
4 from conans.client.build.visual_environment import (VisualStudioBuildEnvironment,
5 vs_build_type_flags, vs_std_cpp)
6 from conans.client.tools.oss import cpu_count
7 from conans.client.tools.win import vcvars_command
8 from conans.errors import ConanException
9 from conans.util.env_reader import get_env
10 from conans.util.files import tmp_file
11 from conans.model.conan_file import ConanFile
12
13
14 class MSBuild(object):
15
16 def __init__(self, conanfile):
17 if isinstance(conanfile, ConanFile):
18 self._conanfile = conanfile
19 self._settings = self._conanfile.settings
20 self._output = self._conanfile.output
21 self.build_env = VisualStudioBuildEnvironment(self._conanfile)
22 else: # backwards compatible with build_sln_command
23 self._settings = conanfile
24 self.build_env = None
25
26 def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,
27 parallel=True, force_vcvars=False, toolset=None, platforms=None):
28 with tools.environment_append(self.build_env.vars):
29 # Path for custom properties file
30 props_file_contents = self._get_props_file_contents()
31 with tmp_file(props_file_contents) as props_file_path:
32 vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars)
33 command = self.get_command(project_file, props_file_path,
34 targets=targets, upgrade_project=upgrade_project,
35 build_type=build_type, arch=arch, parallel=parallel,
36 toolset=toolset, platforms=platforms,
37 use_env=True)
38 command = "%s && %s" % (vcvars, command)
39 return self._conanfile.run(command)
40
41 def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,
42 build_type=None, arch=None, parallel=True, toolset=None, platforms=None,
43 use_env=False):
44
45 targets = targets or []
46 command = ""
47
48 if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False):
49 command += "devenv %s /upgrade && " % project_file
50 else:
51 self._output.info("Skipped sln project upgrade")
52
53 build_type = build_type or self._settings.get_safe("build_type")
54 arch = arch or self._settings.get_safe("arch")
55 if not build_type:
56 raise ConanException("Cannot build_sln_command, build_type not defined")
57 if not arch:
58 raise ConanException("Cannot build_sln_command, arch not defined")
59
60
61
62 command += "msbuild %s /p:Configuration=%s" % (project_file, build_type)
63 msvc_arch = {'x86': 'x86',
64 'x86_64': 'x64',
65 'armv7': 'ARM',
66 'armv8': 'ARM64'}
67 if platforms:
68 msvc_arch.update(platforms)
69 msvc_arch = msvc_arch.get(str(arch))
70 try:
71 sln = tools.load(project_file)
72 pattern = re.compile(r"GlobalSection\(SolutionConfigurationPlatforms\)"
73 r"(.*?)EndGlobalSection", re.DOTALL)
74 solution_global = pattern.search(sln).group(1)
75 lines = solution_global.splitlines()
76 lines = [s.split("=")[0].strip() for s in lines]
77 except Exception:
78 pass
79 else:
80 config = "%s|%s" % (build_type, msvc_arch)
81 if config not in "".join(lines):
82 self._output.warn("***** The configuration %s does not exist in this solution *****" % config)
83 self._output.warn("Use 'platforms' argument to define your architectures")
84
85 if use_env:
86 command += ' /p:UseEnv=true'
87
88 if msvc_arch:
89 command += ' /p:Platform="%s"' % msvc_arch
90
91 if parallel:
92 command += ' /m:%s' % cpu_count()
93
94 if targets:
95 command += " /target:%s" % ";".join(targets)
96
97 if toolset:
98 command += " /p:PlatformToolset=%s" % toolset
99
100 if props_file_path:
101 command += ' /p:ForceImportBeforeCppTargets="%s"' % props_file_path
102
103 return command
104
105 def _get_props_file_contents(self):
106 # how to specify runtime in command line:
107 # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project
108 runtime_library = {"MT": "MultiThreaded",
109 "MTd": "MultiThreadedDebug",
110 "MD": "MultiThreadedDLL",
111 "MDd": "MultiThreadedDebugDLL"}.get(self._settings.get_safe("compiler.runtime"), "")
112
113 if self.build_env:
114 # Take the flags from the build env, the user was able to alter them if needed
115 flags = self.build_env.flags
116 flags.append(self.build_env.std)
117 else: # To be removed when build_sln_command is deprecated
118 flags = vs_build_type_flags(self._settings)
119 flags.append(vs_std_cpp(self._settings))
120
121 template = """<?xml version="1.0" encoding="utf-8"?>
122 <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
123 <ItemDefinitionGroup>
124 <ClCompile>
125 <RuntimeLibrary>{runtime}</RuntimeLibrary>
126 <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
127 </ClCompile>
128 </ItemDefinitionGroup>
129 </Project>""".format(**{"runtime": runtime_library,
130 "compiler_flags": " ".join([flag for flag in flags if flag])})
131 return template
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py
--- a/conans/client/build/msbuild.py
+++ b/conans/client/build/msbuild.py
@@ -24,7 +24,7 @@
self.build_env = None
def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,
- parallel=True, force_vcvars=False, toolset=None, platforms=None):
+ parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True):
with tools.environment_append(self.build_env.vars):
# Path for custom properties file
props_file_contents = self._get_props_file_contents()
@@ -34,7 +34,7 @@
targets=targets, upgrade_project=upgrade_project,
build_type=build_type, arch=arch, parallel=parallel,
toolset=toolset, platforms=platforms,
- use_env=True)
+ use_env=use_env)
command = "%s && %s" % (vcvars, command)
return self._conanfile.run(command)
@@ -43,10 +43,10 @@
use_env=False):
targets = targets or []
- command = ""
+ command = []
if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False):
- command += "devenv %s /upgrade && " % project_file
+ command.append("devenv %s /upgrade &&" % project_file)
else:
self._output.info("Skipped sln project upgrade")
@@ -57,9 +57,7 @@
if not arch:
raise ConanException("Cannot build_sln_command, arch not defined")
-
-
- command += "msbuild %s /p:Configuration=%s" % (project_file, build_type)
+ command.append("msbuild %s /p:Configuration=%s" % (project_file, build_type))
msvc_arch = {'x86': 'x86',
'x86_64': 'x64',
'armv7': 'ARM',
@@ -83,24 +81,24 @@
self._output.warn("Use 'platforms' argument to define your architectures")
if use_env:
- command += ' /p:UseEnv=true'
+ command.append('/p:UseEnv=true')
if msvc_arch:
- command += ' /p:Platform="%s"' % msvc_arch
+ command.append('/p:Platform="%s"' % msvc_arch)
if parallel:
- command += ' /m:%s' % cpu_count()
+ command.append('/m:%s' % cpu_count())
if targets:
- command += " /target:%s" % ";".join(targets)
+ command.append("/target:%s" % ";".join(targets))
if toolset:
- command += " /p:PlatformToolset=%s" % toolset
+ command.append("/p:PlatformToolset=%s" % toolset)
if props_file_path:
- command += ' /p:ForceImportBeforeCppTargets="%s"' % props_file_path
+ command.append('/p:ForceImportBeforeCppTargets="%s"' % props_file_path)
- return command
+ return " ".join(command)
def _get_props_file_contents(self):
# how to specify runtime in command line:
| {"golden_diff": "diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py\n--- a/conans/client/build/msbuild.py\n+++ b/conans/client/build/msbuild.py\n@@ -24,7 +24,7 @@\n self.build_env = None\n \n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n- parallel=True, force_vcvars=False, toolset=None, platforms=None):\n+ parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True):\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents()\n@@ -34,7 +34,7 @@\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n- use_env=True)\n+ use_env=use_env)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n \n@@ -43,10 +43,10 @@\n use_env=False):\n \n targets = targets or []\n- command = \"\"\n+ command = []\n \n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n- command += \"devenv %s /upgrade && \" % project_file\n+ command.append(\"devenv %s /upgrade &&\" % project_file)\n else:\n self._output.info(\"Skipped sln project upgrade\")\n \n@@ -57,9 +57,7 @@\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n \n-\n-\n- command += \"msbuild %s /p:Configuration=%s\" % (project_file, build_type)\n+ command.append(\"msbuild %s /p:Configuration=%s\" % (project_file, build_type))\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n@@ -83,24 +81,24 @@\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n \n if use_env:\n- command += ' /p:UseEnv=true'\n+ command.append('/p:UseEnv=true')\n \n if msvc_arch:\n- command += ' /p:Platform=\"%s\"' % msvc_arch\n+ command.append('/p:Platform=\"%s\"' % msvc_arch)\n \n if parallel:\n- command += ' /m:%s' % cpu_count()\n+ command.append('/m:%s' % cpu_count())\n \n if targets:\n- command += \" /target:%s\" % \";\".join(targets)\n+ command.append(\"/target:%s\" % \";\".join(targets))\n \n if toolset:\n- command += \" /p:PlatformToolset=%s\" % toolset\n+ command.append(\"/p:PlatformToolset=%s\" % toolset)\n \n if props_file_path:\n- command += ' /p:ForceImportBeforeCppTargets=\"%s\"' % props_file_path\n+ command.append('/p:ForceImportBeforeCppTargets=\"%s\"' % props_file_path)\n \n- return command\n+ return \" \".join(command)\n \n def _get_props_file_contents(self):\n # how to specify runtime in command line:\n", "issue": "MSBuild helper useEnv option causes missing include paths in some casess\nIn using MSBuild helper for the first time, I received a number of errors about headers that could not be found. After spending significant time validating that the project works normally when running msbuild manually, I determined that the breaking flag was `/useEnv:True`. \r\n\r\nI briefly looked at the implementation and it was not immediately obvious to me where the problem might be. I have worked around it by using the `get_command` method from the MSBuild helper, which returns a command where the default of `useEnv` is `False`. \r\n\r\nIt's mysterious why it causes this problem, but if everything is correct, then the requested change would be to simply expose `useEnv` as a parameter in the `build` method of the `MSBuild` helper rather than forcing it to `True`.\r\n\r\nI will try to provide a way to reproduce the error, but it's a private project and I don't know if I have time to isolate the problem any time soon. I spent to much time just identifying it. \r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import re\n\nfrom conans import tools\nfrom conans.client.build.visual_environment import (VisualStudioBuildEnvironment,\n vs_build_type_flags, vs_std_cpp)\nfrom conans.client.tools.oss import cpu_count\nfrom conans.client.tools.win import vcvars_command\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import tmp_file\nfrom conans.model.conan_file import ConanFile\n\n\nclass MSBuild(object):\n\n def __init__(self, conanfile):\n if isinstance(conanfile, ConanFile):\n self._conanfile = conanfile\n self._settings = self._conanfile.settings\n self._output = self._conanfile.output\n self.build_env = VisualStudioBuildEnvironment(self._conanfile)\n else: # backwards compatible with build_sln_command\n self._settings = conanfile\n self.build_env = None\n\n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n parallel=True, force_vcvars=False, toolset=None, platforms=None):\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents()\n with tmp_file(props_file_contents) as props_file_path:\n vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars)\n command = self.get_command(project_file, props_file_path,\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n use_env=True)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n\n def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,\n build_type=None, arch=None, parallel=True, toolset=None, platforms=None,\n use_env=False):\n\n targets = targets or []\n command = \"\"\n\n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n command += \"devenv %s /upgrade && \" % project_file\n else:\n self._output.info(\"Skipped sln project upgrade\")\n\n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n\n\n\n command += \"msbuild %s /p:Configuration=%s\" % (project_file, build_type)\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n 'armv8': 'ARM64'}\n if platforms:\n msvc_arch.update(platforms)\n msvc_arch = msvc_arch.get(str(arch))\n try:\n sln = tools.load(project_file)\n pattern = re.compile(r\"GlobalSection\\(SolutionConfigurationPlatforms\\)\"\n r\"(.*?)EndGlobalSection\", re.DOTALL)\n solution_global = pattern.search(sln).group(1)\n lines = solution_global.splitlines()\n lines = [s.split(\"=\")[0].strip() for s in lines]\n except Exception:\n pass\n else:\n config = \"%s|%s\" % (build_type, msvc_arch)\n if config not in \"\".join(lines):\n self._output.warn(\"***** The configuration %s does not exist in this solution *****\" % config)\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n\n if use_env:\n command += ' /p:UseEnv=true'\n\n if msvc_arch:\n command += ' /p:Platform=\"%s\"' % msvc_arch\n\n if parallel:\n command += ' /m:%s' % cpu_count()\n\n if targets:\n command += \" /target:%s\" % \";\".join(targets)\n\n if toolset:\n command += \" /p:PlatformToolset=%s\" % toolset\n\n if props_file_path:\n command += ' /p:ForceImportBeforeCppTargets=\"%s\"' % props_file_path\n\n return command\n\n def _get_props_file_contents(self):\n # how to specify runtime in command line:\n # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project\n runtime_library = {\"MT\": \"MultiThreaded\",\n \"MTd\": \"MultiThreadedDebug\",\n \"MD\": \"MultiThreadedDLL\",\n \"MDd\": \"MultiThreadedDebugDLL\"}.get(self._settings.get_safe(\"compiler.runtime\"), \"\")\n\n if self.build_env:\n # Take the flags from the build env, the user was able to alter them if needed\n flags = self.build_env.flags\n flags.append(self.build_env.std)\n else: # To be removed when build_sln_command is deprecated\n flags = vs_build_type_flags(self._settings)\n flags.append(vs_std_cpp(self._settings))\n\n template = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <Project xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ItemDefinitionGroup>\n <ClCompile>\n <RuntimeLibrary>{runtime}</RuntimeLibrary>\n <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n </ItemDefinitionGroup>\n </Project>\"\"\".format(**{\"runtime\": runtime_library,\n \"compiler_flags\": \" \".join([flag for flag in flags if flag])})\n return template\n", "path": "conans/client/build/msbuild.py"}], "after_files": [{"content": "import re\n\nfrom conans import tools\nfrom conans.client.build.visual_environment import (VisualStudioBuildEnvironment,\n vs_build_type_flags, vs_std_cpp)\nfrom conans.client.tools.oss import cpu_count\nfrom conans.client.tools.win import vcvars_command\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import tmp_file\nfrom conans.model.conan_file import ConanFile\n\n\nclass MSBuild(object):\n\n def __init__(self, conanfile):\n if isinstance(conanfile, ConanFile):\n self._conanfile = conanfile\n self._settings = self._conanfile.settings\n self._output = self._conanfile.output\n self.build_env = VisualStudioBuildEnvironment(self._conanfile)\n else: # backwards compatible with build_sln_command\n self._settings = conanfile\n self.build_env = None\n\n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True):\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents()\n with tmp_file(props_file_contents) as props_file_path:\n vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars)\n command = self.get_command(project_file, props_file_path,\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n use_env=use_env)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n\n def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,\n build_type=None, arch=None, parallel=True, toolset=None, platforms=None,\n use_env=False):\n\n targets = targets or []\n command = []\n\n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n command.append(\"devenv %s /upgrade &&\" % project_file)\n else:\n self._output.info(\"Skipped sln project upgrade\")\n\n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n\n command.append(\"msbuild %s /p:Configuration=%s\" % (project_file, build_type))\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n 'armv8': 'ARM64'}\n if platforms:\n msvc_arch.update(platforms)\n msvc_arch = msvc_arch.get(str(arch))\n try:\n sln = tools.load(project_file)\n pattern = re.compile(r\"GlobalSection\\(SolutionConfigurationPlatforms\\)\"\n r\"(.*?)EndGlobalSection\", re.DOTALL)\n solution_global = pattern.search(sln).group(1)\n lines = solution_global.splitlines()\n lines = [s.split(\"=\")[0].strip() for s in lines]\n except Exception:\n pass\n else:\n config = \"%s|%s\" % (build_type, msvc_arch)\n if config not in \"\".join(lines):\n self._output.warn(\"***** The configuration %s does not exist in this solution *****\" % config)\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n\n if use_env:\n command.append('/p:UseEnv=true')\n\n if msvc_arch:\n command.append('/p:Platform=\"%s\"' % msvc_arch)\n\n if parallel:\n command.append('/m:%s' % cpu_count())\n\n if targets:\n command.append(\"/target:%s\" % \";\".join(targets))\n\n if toolset:\n command.append(\"/p:PlatformToolset=%s\" % toolset)\n\n if props_file_path:\n command.append('/p:ForceImportBeforeCppTargets=\"%s\"' % props_file_path)\n\n return \" \".join(command)\n\n def _get_props_file_contents(self):\n # how to specify runtime in command line:\n # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project\n runtime_library = {\"MT\": \"MultiThreaded\",\n \"MTd\": \"MultiThreadedDebug\",\n \"MD\": \"MultiThreadedDLL\",\n \"MDd\": \"MultiThreadedDebugDLL\"}.get(self._settings.get_safe(\"compiler.runtime\"), \"\")\n\n if self.build_env:\n # Take the flags from the build env, the user was able to alter them if needed\n flags = self.build_env.flags\n flags.append(self.build_env.std)\n else: # To be removed when build_sln_command is deprecated\n flags = vs_build_type_flags(self._settings)\n flags.append(vs_std_cpp(self._settings))\n\n template = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <Project xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ItemDefinitionGroup>\n <ClCompile>\n <RuntimeLibrary>{runtime}</RuntimeLibrary>\n <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n </ItemDefinitionGroup>\n </Project>\"\"\".format(**{\"runtime\": runtime_library,\n \"compiler_flags\": \" \".join([flag for flag in flags if flag])})\n return template\n", "path": "conans/client/build/msbuild.py"}]} | 2,117 | 763 |
gh_patches_debug_5620 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-551 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flask instrumentation example doesn't work
**Describe your environment**
Running on MacOS against the latest `mater` version of the `opentelemetry-*` packages.
**Steps to reproduce**
Instrument the app as described in the [docs](https://open-telemetry.github.io/opentelemetry-python/ext/flask/flask.html) and [docstring](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py) -
```python
from flask import Flask
from opentelemetry.ext.flask import instrument_app
app = Flask(__name__)
instrument_app(app)
````
**What is the expected behavior?**
I would expect the flask app to start without any errors when running it locally.
**What is the actual behavior?**
I get an error running the app -
```
from opentelemetry.ext.flask import instrument_app
E ImportError: cannot import name 'instrument_app'
```
This is consistent with the fact that the `instrument_app` symbol no longer appears to be in [`opentelemetry.ext.flask`](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py).
<hr>
The last time I ran this code was against v0.3 of the packages, so I imagine a lot has changed since then. It would be helpful to document the new correct way to instrument flask apps as it isn't currently clear to me what the correct way to do this is from looking at the docs and the code. Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Note: This package is not named "flask" because of
16 # https://github.com/PyCQA/pylint/issues/2648
17
18 """
19 This library builds on the OpenTelemetry WSGI middleware to track web requests
20 in Flask applications. In addition to opentelemetry-ext-wsgi, it supports
21 flask-specific features such as:
22
23 * The Flask endpoint name is used as the Span name.
24 * The ``http.route`` Span attribute is set so that one can see which URL rule
25 matched a request.
26
27 Usage
28 -----
29
30 .. code-block:: python
31
32 from flask import Flask
33 from opentelemetry.ext.flask import instrument_app
34
35 app = Flask(__name__)
36 instrument_app(app) # This is where the magic happens. ✨
37
38 @app.route("/")
39 def hello():
40 return "Hello!"
41
42 if __name__ == "__main__":
43 app.run(debug=True)
44
45 API
46 ---
47 """
48
49 import logging
50
51 import flask
52
53 import opentelemetry.ext.wsgi as otel_wsgi
54 from opentelemetry import context, propagators, trace
55 from opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor
56 from opentelemetry.ext.flask.version import __version__
57 from opentelemetry.util import time_ns
58
59 logger = logging.getLogger(__name__)
60
61 _ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key"
62 _ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key"
63 _ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key"
64 _ENVIRON_TOKEN = "opentelemetry-flask.token"
65
66
67 class _InstrumentedFlask(flask.Flask):
68 def __init__(self, *args, **kwargs):
69
70 super().__init__(*args, **kwargs)
71
72 # Single use variable here to avoid recursion issues.
73 wsgi = self.wsgi_app
74
75 def wrapped_app(environ, start_response):
76 # We want to measure the time for route matching, etc.
77 # In theory, we could start the span here and use
78 # update_name later but that API is "highly discouraged" so
79 # we better avoid it.
80 environ[_ENVIRON_STARTTIME_KEY] = time_ns()
81
82 def _start_response(status, response_headers, *args, **kwargs):
83 span = flask.request.environ.get(_ENVIRON_SPAN_KEY)
84 if span:
85 otel_wsgi.add_response_attributes(
86 span, status, response_headers
87 )
88 else:
89 logger.warning(
90 "Flask environ's OpenTelemetry span "
91 "missing at _start_response(%s)",
92 status,
93 )
94
95 return start_response(
96 status, response_headers, *args, **kwargs
97 )
98
99 return wsgi(environ, _start_response)
100
101 self.wsgi_app = wrapped_app
102
103 @self.before_request
104 def _before_flask_request():
105 environ = flask.request.environ
106 span_name = (
107 flask.request.endpoint
108 or otel_wsgi.get_default_span_name(environ)
109 )
110 token = context.attach(
111 propagators.extract(otel_wsgi.get_header_from_environ, environ)
112 )
113
114 tracer = trace.get_tracer(__name__, __version__)
115
116 attributes = otel_wsgi.collect_request_attributes(environ)
117 if flask.request.url_rule:
118 # For 404 that result from no route found, etc, we
119 # don't have a url_rule.
120 attributes["http.route"] = flask.request.url_rule.rule
121 span = tracer.start_span(
122 span_name,
123 kind=trace.SpanKind.SERVER,
124 attributes=attributes,
125 start_time=environ.get(_ENVIRON_STARTTIME_KEY),
126 )
127 activation = tracer.use_span(span, end_on_exit=True)
128 activation.__enter__()
129 environ[_ENVIRON_ACTIVATION_KEY] = activation
130 environ[_ENVIRON_SPAN_KEY] = span
131 environ[_ENVIRON_TOKEN] = token
132
133 @self.teardown_request
134 def _teardown_flask_request(exc):
135 activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)
136 if not activation:
137 logger.warning(
138 "Flask environ's OpenTelemetry activation missing"
139 "at _teardown_flask_request(%s)",
140 exc,
141 )
142 return
143
144 if exc is None:
145 activation.__exit__(None, None, None)
146 else:
147 activation.__exit__(
148 type(exc), exc, getattr(exc, "__traceback__", None)
149 )
150 context.detach(flask.request.environ.get(_ENVIRON_TOKEN))
151
152
153 class FlaskInstrumentor(BaseInstrumentor):
154 """A instrumentor for flask.Flask
155
156 See `BaseInstrumentor`
157 """
158
159 def __init__(self):
160 super().__init__()
161 self._original_flask = None
162
163 def _instrument(self):
164 self._original_flask = flask.Flask
165 flask.Flask = _InstrumentedFlask
166
167 def _uninstrument(self):
168 flask.Flask = self._original_flask
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
@@ -29,11 +29,11 @@
.. code-block:: python
+ from opentelemetry.ext.flask import FlaskInstrumentor
+ FlaskInstrumentor().instrument() # This needs to be executed before importing Flask
from flask import Flask
- from opentelemetry.ext.flask import instrument_app
app = Flask(__name__)
- instrument_app(app) # This is where the magic happens. ✨
@app.route("/")
def hello():
| {"golden_diff": "diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n@@ -29,11 +29,11 @@\n \n .. code-block:: python\n \n+ from opentelemetry.ext.flask import FlaskInstrumentor\n+ FlaskInstrumentor().instrument() # This needs to be executed before importing Flask\n from flask import Flask\n- from opentelemetry.ext.flask import instrument_app\n \n app = Flask(__name__)\n- instrument_app(app) # This is where the magic happens. \u2728\n \n @app.route(\"/\")\n def hello():\n", "issue": "Flask instrumentation example doesn't work\n**Describe your environment**\r\nRunning on MacOS against the latest `mater` version of the `opentelemetry-*` packages.\r\n\r\n**Steps to reproduce**\r\nInstrument the app as described in the [docs](https://open-telemetry.github.io/opentelemetry-python/ext/flask/flask.html) and [docstring](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py) -\r\n\r\n```python\r\nfrom flask import Flask\r\nfrom opentelemetry.ext.flask import instrument_app\r\n \r\napp = Flask(__name__)\r\ninstrument_app(app)\r\n````\r\n\r\n**What is the expected behavior?**\r\nI would expect the flask app to start without any errors when running it locally.\r\n\r\n**What is the actual behavior?**\r\nI get an error running the app -\r\n\r\n```\r\n from opentelemetry.ext.flask import instrument_app\r\nE ImportError: cannot import name 'instrument_app'\r\n```\r\n\r\nThis is consistent with the fact that the `instrument_app` symbol no longer appears to be in [`opentelemetry.ext.flask`](https://github.com/open-telemetry/opentelemetry-python/blob/master/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py).\r\n\r\n<hr>\r\nThe last time I ran this code was against v0.3 of the packages, so I imagine a lot has changed since then. It would be helpful to document the new correct way to instrument flask apps as it isn't currently clear to me what the correct way to do this is from looking at the docs and the code. Thanks! \r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-ext-wsgi, it supports\nflask-specific features such as:\n\n* The Flask endpoint name is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.ext.flask import instrument_app\n\n app = Flask(__name__)\n instrument_app(app) # This is where the magic happens. \u2728\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nAPI\n---\n\"\"\"\n\nimport logging\n\nimport flask\n\nimport opentelemetry.ext.wsgi as otel_wsgi\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.ext.flask.version import __version__\nfrom opentelemetry.util import time_ns\n\nlogger = logging.getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\nclass _InstrumentedFlask(flask.Flask):\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Single use variable here to avoid recursion issues.\n wsgi = self.wsgi_app\n\n def wrapped_app(environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n else:\n logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n\n return start_response(\n status, response_headers, *args, **kwargs\n )\n\n return wsgi(environ, _start_response)\n\n self.wsgi_app = wrapped_app\n\n @self.before_request\n def _before_flask_request():\n environ = flask.request.environ\n span_name = (\n flask.request.endpoint\n or otel_wsgi.get_default_span_name(environ)\n )\n token = context.attach(\n propagators.extract(otel_wsgi.get_header_from_environ, environ)\n )\n\n tracer = trace.get_tracer(__name__, __version__)\n\n attributes = otel_wsgi.collect_request_attributes(environ)\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\"http.route\"] = flask.request.url_rule.rule\n span = tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n start_time=environ.get(_ENVIRON_STARTTIME_KEY),\n )\n activation = tracer.use_span(span, end_on_exit=True)\n activation.__enter__()\n environ[_ENVIRON_ACTIVATION_KEY] = activation\n environ[_ENVIRON_SPAN_KEY] = span\n environ[_ENVIRON_TOKEN] = token\n\n @self.teardown_request\n def _teardown_flask_request(exc):\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n logger.warning(\n \"Flask environ's OpenTelemetry activation missing\"\n \"at _teardown_flask_request(%s)\",\n exc,\n )\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_flask = None\n\n def _instrument(self):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self):\n flask.Flask = self._original_flask\n", "path": "ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-ext-wsgi, it supports\nflask-specific features such as:\n\n* The Flask endpoint name is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.ext.flask import FlaskInstrumentor\n FlaskInstrumentor().instrument() # This needs to be executed before importing Flask\n from flask import Flask\n\n app = Flask(__name__)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nAPI\n---\n\"\"\"\n\nimport logging\n\nimport flask\n\nimport opentelemetry.ext.wsgi as otel_wsgi\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.ext.flask.version import __version__\nfrom opentelemetry.util import time_ns\n\nlogger = logging.getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\nclass _InstrumentedFlask(flask.Flask):\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Single use variable here to avoid recursion issues.\n wsgi = self.wsgi_app\n\n def wrapped_app(environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n else:\n logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n\n return start_response(\n status, response_headers, *args, **kwargs\n )\n\n return wsgi(environ, _start_response)\n\n self.wsgi_app = wrapped_app\n\n @self.before_request\n def _before_flask_request():\n environ = flask.request.environ\n span_name = (\n flask.request.endpoint\n or otel_wsgi.get_default_span_name(environ)\n )\n token = context.attach(\n propagators.extract(otel_wsgi.get_header_from_environ, environ)\n )\n\n tracer = trace.get_tracer(__name__, __version__)\n\n attributes = otel_wsgi.collect_request_attributes(environ)\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\"http.route\"] = flask.request.url_rule.rule\n span = tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n start_time=environ.get(_ENVIRON_STARTTIME_KEY),\n )\n activation = tracer.use_span(span, end_on_exit=True)\n activation.__enter__()\n environ[_ENVIRON_ACTIVATION_KEY] = activation\n environ[_ENVIRON_SPAN_KEY] = span\n environ[_ENVIRON_TOKEN] = token\n\n @self.teardown_request\n def _teardown_flask_request(exc):\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n logger.warning(\n \"Flask environ's OpenTelemetry activation missing\"\n \"at _teardown_flask_request(%s)\",\n exc,\n )\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_flask = None\n\n def _instrument(self):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self):\n flask.Flask = self._original_flask\n", "path": "ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py"}]} | 2,213 | 192 |
gh_patches_debug_59676 | rasdani/github-patches | git_diff | mozilla__bugbug-90 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'Is reporter a Mozillian' as a feature
The first implementation will simply check if an email contains "@mozilla.com" or "@mozilla.org".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/bug_features.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import re
7
8 import pandas as pd
9 from sklearn.base import BaseEstimator
10 from sklearn.base import TransformerMixin
11
12 from bugbug import bug_snapshot
13
14
15 def field(bug, field):
16 if field in bug and bug[field] != '---':
17 return bug[field]
18
19 return None
20
21
22 class has_str(object):
23 def __call__(self, bug):
24 return field(bug, 'cf_has_str')
25
26
27 class has_regression_range(object):
28 def __call__(self, bug):
29 return field(bug, 'cf_has_regression_range')
30
31
32 class has_crash_signature(object):
33 def __call__(self, bug):
34 return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''
35
36
37 class keywords(object):
38 def __init__(self, to_ignore=set()):
39 self.to_ignore = to_ignore
40
41 def __call__(self, bug):
42 keywords = []
43 subkeywords = []
44 for keyword in bug['keywords']:
45 if keyword in self.to_ignore:
46 continue
47
48 keywords.append(keyword)
49
50 if keyword.startswith('sec-'):
51 subkeywords.append('sec-')
52 elif keyword.startswith('csectype-'):
53 subkeywords.append('csectype-')
54 return keywords + subkeywords
55
56
57 class severity(object):
58 def __call__(self, bug):
59 return field(bug, 'severity')
60
61
62 class is_coverity_issue(object):
63 def __call__(self, bug):
64 return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None
65
66
67 class has_url(object):
68 def __call__(self, bug):
69 return bug['url'] != ''
70
71
72 class has_w3c_url(object):
73 def __call__(self, bug):
74 return 'w3c' in bug['url']
75
76
77 class has_github_url(object):
78 def __call__(self, bug):
79 return 'github' in bug['url']
80
81
82 class whiteboard(object):
83 def __call__(self, bug):
84 ret = []
85
86 # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only
87
88 for elem in ['memshrink', '[ux]']:
89 if elem in bug['whiteboard'].lower():
90 ret.append(elem)
91
92 return ret
93
94
95 class patches(object):
96 def __call__(self, bug):
97 return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])
98
99
100 class landings(object):
101 def __call__(self, bug):
102 return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])
103
104
105 class title(object):
106 def __call__(self, bug):
107 ret = []
108
109 keywords = [
110 'fail',
111 ]
112 for keyword in keywords:
113 if keyword in bug['summary'].lower():
114 ret.append(keyword)
115
116 return ret
117
118
119 class product(object):
120 def __call__(self, bug):
121 return bug['product']
122
123
124 class component(object):
125 def __call__(self, bug):
126 return bug['component']
127
128
129 def cleanup_url(text):
130 text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
131 return re.sub(r'http\S+', '__URL__', text)
132
133
134 def cleanup_fileref(text):
135 return re.sub(r'\w+\.py\b|\w+\.json\b|\w+\.js\b|\w+\.jsm\b|\w+\.html\b|\w+\.css\b|\w+\.c\b|\w+\.cpp\b|\w+\.h\b', '__FILE_REFERENCE__', text)
136
137
138 def cleanup_responses(text):
139 return re.sub('>[^\n]+', ' ', text)
140
141
142 def cleanup_hex(text):
143 return re.sub(r'\b0[xX][0-9a-fA-F]+\b', '__HEX_NUMBER__', text)
144
145
146 def cleanup_dll(text):
147 return re.sub(r'\w+\.dll\b', '__DLL_NAME__', text)
148
149
150 def cleanup_synonyms(text):
151 synonyms = [
152 ('safemode', ['safemode', 'safe mode']),
153 ('str', ['str', 'steps to reproduce', 'repro steps']),
154 ('uaf', ['uaf', 'use after free', 'use-after-free']),
155 ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),
156 ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),
157 ]
158
159 for synonym_group, synonym_list in synonyms:
160 text = re.sub('|'.join(synonym_list), synonym_group, text, flags=re.IGNORECASE)
161
162 return text
163
164
165 def cleanup_crash(text):
166 return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\b', '__CRASH_STATS_LINK__', text)
167
168
169 class BugExtractor(BaseEstimator, TransformerMixin):
170 def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_messages_map=None):
171 self.feature_extractors = feature_extractors
172 self.cleanup_functions = cleanup_functions
173 self.rollback = rollback
174 self.rollback_when = rollback_when
175 self.commit_messages_map = commit_messages_map
176
177 def fit(self, x, y=None):
178 return self
179
180 def transform(self, bugs):
181 results = []
182
183 for bug in bugs:
184 bug_id = bug['id']
185
186 if self.rollback:
187 bug = bug_snapshot.rollback(bug, self.rollback_when)
188
189 data = {}
190
191 for f in self.feature_extractors:
192 res = f(bug)
193
194 if res is None:
195 continue
196
197 if isinstance(res, list):
198 for item in res:
199 data[f.__class__.__name__ + '-' + item] = 'True'
200 continue
201
202 if isinstance(res, bool):
203 res = str(res)
204
205 data[f.__class__.__name__] = res
206
207 # TODO: Try simply using all possible fields instead of extracting features manually.
208
209 for cleanup_function in self.cleanup_functions:
210 bug['summary'] = cleanup_function(bug['summary'])
211 for c in bug['comments']:
212 c['text'] = cleanup_function(c['text'])
213
214 result = {
215 'data': data,
216 'title': bug['summary'],
217 'first_comment': bug['comments'][0]['text'],
218 'comments': ' '.join([c['text'] for c in bug['comments']]),
219 }
220
221 if self.commit_messages_map is not None:
222 result['commits'] = self.commit_messages_map[bug_id] if bug_id in self.commit_messages_map else ''
223
224 results.append(result)
225
226 return pd.DataFrame(results)
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py
--- a/bugbug/bug_features.py
+++ b/bugbug/bug_features.py
@@ -126,6 +126,11 @@
return bug['component']
+class is_mozillian(object):
+ def __call__(self, bug):
+ return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])
+
+
def cleanup_url(text):
text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
return re.sub(r'http\S+', '__URL__', text)
| {"golden_diff": "diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py\n--- a/bugbug/bug_features.py\n+++ b/bugbug/bug_features.py\n@@ -126,6 +126,11 @@\n return bug['component']\n \n \n+class is_mozillian(object):\n+ def __call__(self, bug):\n+ return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n+\n+\n def cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n", "issue": "'Is reporter a Mozillian' as a feature\nThe first implementation will simply check if an email contains \"@mozilla.com\" or \"@mozilla.org\".\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n ret = []\n\n # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only\n\n for elem in ['memshrink', '[ux]']:\n if elem in bug['whiteboard'].lower():\n ret.append(elem)\n\n return ret\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+\\.dll\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_messages_map=None):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_messages_map = commit_messages_map\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n if self.commit_messages_map is not None:\n result['commits'] = self.commit_messages_map[bug_id] if bug_id in self.commit_messages_map else ''\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n ret = []\n\n # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only\n\n for elem in ['memshrink', '[ux]']:\n if elem in bug['whiteboard'].lower():\n ret.append(elem)\n\n return ret\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+\\.dll\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_messages_map=None):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_messages_map = commit_messages_map\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n if self.commit_messages_map is not None:\n result['commits'] = self.commit_messages_map[bug_id] if bug_id in self.commit_messages_map else ''\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}]} | 2,488 | 158 |
gh_patches_debug_27214 | rasdani/github-patches | git_diff | mdn__kuma-6134 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove contributor notification post account creation
Once a user has successfully signed up, we show a banner similar to the one below either just below the page header, or generally at the top of the page.

Because of the changes to account roles, these no longer makes sense and should be removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/signal_handlers.py`
Content:
```
1 from allauth.account.signals import email_confirmed, user_signed_up
2 from allauth.socialaccount.signals import social_account_removed
3 from django.contrib import messages
4 from django.core.exceptions import ObjectDoesNotExist
5 from django.db import transaction
6 from django.db.models.signals import post_delete, post_save, pre_delete
7 from django.dispatch import receiver
8 from django.utils.translation import ugettext_lazy as _
9 from waffle import switch_is_active
10
11 from kuma.core.urlresolvers import reverse
12 from kuma.payments.utils import cancel_stripe_customer_subscription
13 from kuma.wiki.jobs import DocumentContributorsJob
14
15 from .models import User, UserBan
16 from .tasks import send_welcome_email
17
18
19 @receiver(user_signed_up, dispatch_uid='users.user_signed_up')
20 def on_user_signed_up(sender, request, user, **kwargs):
21 """
22 Signal handler to be called when a given user has signed up.
23 """
24 url = reverse('wiki.document', args=['MDN/Getting_started'])
25 msg = _('You have completed the first step of '
26 '<a href="%s">getting started with MDN</a>') % url
27 messages.success(request, msg)
28 if switch_is_active('welcome_email'):
29 # only send if the user has already verified
30 # at least one email address
31 if user.emailaddress_set.filter(verified=True).exists():
32 transaction.on_commit(
33 lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
34 )
35
36
37 @receiver(email_confirmed, dispatch_uid='users.email_confirmed')
38 def on_email_confirmed(sender, request, email_address, **kwargs):
39 """
40 Signal handler to be called when a given email address was confirmed
41 by a user.
42 """
43 if switch_is_active('welcome_email'):
44 # only send if the user has exactly one verified (the given)
45 # email address, in other words if it was just confirmed
46 user = email_address.user
47 previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)
48 if not previous_emails.exists():
49 transaction.on_commit(
50 lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
51 )
52
53
54 @receiver(social_account_removed, dispatch_uid='users.social_account_removed')
55 def on_social_account_removed(sender, request, socialaccount, **kwargs):
56 """
57 Invoked just after a user successfully removed a social account
58
59 We use it to reset the name of the socialaccount provider in
60 the user's session to one that he also has.
61 """
62 user = socialaccount.user
63 try:
64 all_socialaccounts = user.socialaccount_set.all()
65 next_socialaccount = all_socialaccounts[0]
66 request.session['sociallogin_provider'] = next_socialaccount.provider
67 request.session.modified = True
68 except (ObjectDoesNotExist, IndexError):
69 pass
70
71
72 @receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')
73 def on_ban_save(sender, instance, **kwargs):
74 """
75 Signal handler to be called when a given user ban is saved.
76 """
77 user = instance.user
78 user.is_active = not instance.is_active
79 user.save()
80 invalidate_document_contribution(user)
81
82
83 @receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')
84 def on_ban_delete(sender, instance, **kwargs):
85 """
86 Signal handler to be called when a user ban is deleted.
87 """
88 user = instance.user
89 user.is_active = True
90 user.save()
91 invalidate_document_contribution(user)
92
93
94 def invalidate_document_contribution(user):
95 """
96 Invalidate the contributor list for Documents the user has edited.
97
98 This will remove them if they have been banned, and add them if they
99 have been unbanned.
100 """
101 revisions = user.created_revisions
102 doc_ids = set(revisions.values_list('document_id', flat=True))
103 job = DocumentContributorsJob()
104 for doc_id in doc_ids:
105 job.invalidate(doc_id)
106
107
108 @receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')
109 def unsubscribe_payments_on_user_delete(sender, instance, **kwargs):
110 """Cancel Stripe subscriptions before deleting User."""
111 user = instance
112 if user.stripe_customer_id:
113 # This may raise an exception if the Stripe API call fails.
114 # This will stop User deletion while an admin investigates.
115 cancel_stripe_customer_subscription(user.stripe_customer_id)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/users/signal_handlers.py b/kuma/users/signal_handlers.py
--- a/kuma/users/signal_handlers.py
+++ b/kuma/users/signal_handlers.py
@@ -1,14 +1,11 @@
from allauth.account.signals import email_confirmed, user_signed_up
from allauth.socialaccount.signals import social_account_removed
-from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models.signals import post_delete, post_save, pre_delete
from django.dispatch import receiver
-from django.utils.translation import ugettext_lazy as _
from waffle import switch_is_active
-from kuma.core.urlresolvers import reverse
from kuma.payments.utils import cancel_stripe_customer_subscription
from kuma.wiki.jobs import DocumentContributorsJob
@@ -21,10 +18,6 @@
"""
Signal handler to be called when a given user has signed up.
"""
- url = reverse('wiki.document', args=['MDN/Getting_started'])
- msg = _('You have completed the first step of '
- '<a href="%s">getting started with MDN</a>') % url
- messages.success(request, msg)
if switch_is_active('welcome_email'):
# only send if the user has already verified
# at least one email address
| {"golden_diff": "diff --git a/kuma/users/signal_handlers.py b/kuma/users/signal_handlers.py\n--- a/kuma/users/signal_handlers.py\n+++ b/kuma/users/signal_handlers.py\n@@ -1,14 +1,11 @@\n from allauth.account.signals import email_confirmed, user_signed_up\n from allauth.socialaccount.signals import social_account_removed\n-from django.contrib import messages\n from django.core.exceptions import ObjectDoesNotExist\n from django.db import transaction\n from django.db.models.signals import post_delete, post_save, pre_delete\n from django.dispatch import receiver\n-from django.utils.translation import ugettext_lazy as _\n from waffle import switch_is_active\n \n-from kuma.core.urlresolvers import reverse\n from kuma.payments.utils import cancel_stripe_customer_subscription\n from kuma.wiki.jobs import DocumentContributorsJob\n \n@@ -21,10 +18,6 @@\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n- url = reverse('wiki.document', args=['MDN/Getting_started'])\n- msg = _('You have completed the first step of '\n- '<a href=\"%s\">getting started with MDN</a>') % url\n- messages.success(request, msg)\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n", "issue": "Remove contributor notification post account creation\nOnce a user has successfully signed up, we show a banner similar to the one below either just below the page header, or generally at the top of the page.\r\n\r\n\r\n\r\n\r\nBecause of the changes to account roles, these no longer makes sense and should be removed.\n", "before_files": [{"content": "from allauth.account.signals import email_confirmed, user_signed_up\nfrom allauth.socialaccount.signals import social_account_removed\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models.signals import post_delete, post_save, pre_delete\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom waffle import switch_is_active\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.payments.utils import cancel_stripe_customer_subscription\nfrom kuma.wiki.jobs import DocumentContributorsJob\n\nfrom .models import User, UserBan\nfrom .tasks import send_welcome_email\n\n\n@receiver(user_signed_up, dispatch_uid='users.user_signed_up')\ndef on_user_signed_up(sender, request, user, **kwargs):\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n url = reverse('wiki.document', args=['MDN/Getting_started'])\n msg = _('You have completed the first step of '\n '<a href=\"%s\">getting started with MDN</a>') % url\n messages.success(request, msg)\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n if user.emailaddress_set.filter(verified=True).exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(email_confirmed, dispatch_uid='users.email_confirmed')\ndef on_email_confirmed(sender, request, email_address, **kwargs):\n \"\"\"\n Signal handler to be called when a given email address was confirmed\n by a user.\n \"\"\"\n if switch_is_active('welcome_email'):\n # only send if the user has exactly one verified (the given)\n # email address, in other words if it was just confirmed\n user = email_address.user\n previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)\n if not previous_emails.exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(social_account_removed, dispatch_uid='users.social_account_removed')\ndef on_social_account_removed(sender, request, socialaccount, **kwargs):\n \"\"\"\n Invoked just after a user successfully removed a social account\n\n We use it to reset the name of the socialaccount provider in\n the user's session to one that he also has.\n \"\"\"\n user = socialaccount.user\n try:\n all_socialaccounts = user.socialaccount_set.all()\n next_socialaccount = all_socialaccounts[0]\n request.session['sociallogin_provider'] = next_socialaccount.provider\n request.session.modified = True\n except (ObjectDoesNotExist, IndexError):\n pass\n\n\n@receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')\ndef on_ban_save(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a given user ban is saved.\n \"\"\"\n user = instance.user\n user.is_active = not instance.is_active\n user.save()\n invalidate_document_contribution(user)\n\n\n@receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')\ndef on_ban_delete(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a user ban is deleted.\n \"\"\"\n user = instance.user\n user.is_active = True\n user.save()\n invalidate_document_contribution(user)\n\n\ndef invalidate_document_contribution(user):\n \"\"\"\n Invalidate the contributor list for Documents the user has edited.\n\n This will remove them if they have been banned, and add them if they\n have been unbanned.\n \"\"\"\n revisions = user.created_revisions\n doc_ids = set(revisions.values_list('document_id', flat=True))\n job = DocumentContributorsJob()\n for doc_id in doc_ids:\n job.invalidate(doc_id)\n\n\n@receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')\ndef unsubscribe_payments_on_user_delete(sender, instance, **kwargs):\n \"\"\"Cancel Stripe subscriptions before deleting User.\"\"\"\n user = instance\n if user.stripe_customer_id:\n # This may raise an exception if the Stripe API call fails.\n # This will stop User deletion while an admin investigates.\n cancel_stripe_customer_subscription(user.stripe_customer_id)\n", "path": "kuma/users/signal_handlers.py"}], "after_files": [{"content": "from allauth.account.signals import email_confirmed, user_signed_up\nfrom allauth.socialaccount.signals import social_account_removed\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models.signals import post_delete, post_save, pre_delete\nfrom django.dispatch import receiver\nfrom waffle import switch_is_active\n\nfrom kuma.payments.utils import cancel_stripe_customer_subscription\nfrom kuma.wiki.jobs import DocumentContributorsJob\n\nfrom .models import User, UserBan\nfrom .tasks import send_welcome_email\n\n\n@receiver(user_signed_up, dispatch_uid='users.user_signed_up')\ndef on_user_signed_up(sender, request, user, **kwargs):\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n if user.emailaddress_set.filter(verified=True).exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(email_confirmed, dispatch_uid='users.email_confirmed')\ndef on_email_confirmed(sender, request, email_address, **kwargs):\n \"\"\"\n Signal handler to be called when a given email address was confirmed\n by a user.\n \"\"\"\n if switch_is_active('welcome_email'):\n # only send if the user has exactly one verified (the given)\n # email address, in other words if it was just confirmed\n user = email_address.user\n previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)\n if not previous_emails.exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(social_account_removed, dispatch_uid='users.social_account_removed')\ndef on_social_account_removed(sender, request, socialaccount, **kwargs):\n \"\"\"\n Invoked just after a user successfully removed a social account\n\n We use it to reset the name of the socialaccount provider in\n the user's session to one that he also has.\n \"\"\"\n user = socialaccount.user\n try:\n all_socialaccounts = user.socialaccount_set.all()\n next_socialaccount = all_socialaccounts[0]\n request.session['sociallogin_provider'] = next_socialaccount.provider\n request.session.modified = True\n except (ObjectDoesNotExist, IndexError):\n pass\n\n\n@receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')\ndef on_ban_save(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a given user ban is saved.\n \"\"\"\n user = instance.user\n user.is_active = not instance.is_active\n user.save()\n invalidate_document_contribution(user)\n\n\n@receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')\ndef on_ban_delete(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a user ban is deleted.\n \"\"\"\n user = instance.user\n user.is_active = True\n user.save()\n invalidate_document_contribution(user)\n\n\ndef invalidate_document_contribution(user):\n \"\"\"\n Invalidate the contributor list for Documents the user has edited.\n\n This will remove them if they have been banned, and add them if they\n have been unbanned.\n \"\"\"\n revisions = user.created_revisions\n doc_ids = set(revisions.values_list('document_id', flat=True))\n job = DocumentContributorsJob()\n for doc_id in doc_ids:\n job.invalidate(doc_id)\n\n\n@receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')\ndef unsubscribe_payments_on_user_delete(sender, instance, **kwargs):\n \"\"\"Cancel Stripe subscriptions before deleting User.\"\"\"\n user = instance\n if user.stripe_customer_id:\n # This may raise an exception if the Stripe API call fails.\n # This will stop User deletion while an admin investigates.\n cancel_stripe_customer_subscription(user.stripe_customer_id)\n", "path": "kuma/users/signal_handlers.py"}]} | 1,577 | 288 |
gh_patches_debug_26405 | rasdani/github-patches | git_diff | xonsh__xonsh-4156 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
abbrevs should only be expanded at the start of the command
<!--- Provide a general summary of the issue in the Title above -->
<!--- If you have a question along the lines of "How do I do this Bash command in xonsh"
please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html
If you don't find an answer there, please do open an issue! -->
## xonfig
<details>
```
+------------------+---------------------+
| xonsh | 0.9.18 |
| Git SHA | 26475146 |
| Commit Date | May 5 06:42:28 2020 |
| Python | 3.8.3 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.5 |
| shell type | prompt_toolkit |
| pygments | 2.6.1 |
| on posix | True |
| on linux | True |
| distro | manjaro |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
+------------------+---------------------+
```
</details>
<!--- Tell us what should happen -->
as of abbrevs expands in all places. So it may clash will subcommands. For example, if we have
~~`aliases['ps'] = 'procs'`~~
```
abbrevs['ps'] = 'procs'
```
and do type `docker ps` then it will get expanded to `docker procs`.
So the default behaviour should be to expand only at the start of the command. special handling for sudo can be added.
Also we could have special symbols in aliases to mark it as expandable in all places like `aliases['*ps*']` like the glob pattern.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xontrib/abbrevs.py`
Content:
```
1 """
2 Command abbreviations.
3
4 This expands input words from `abbrevs` disctionary as you type.
5 Adds ``abbrevs`` dictionary to hold user-defined "command abbreviations.
6 The dictionary is searched as you type the matching words are replaced
7 at the command line by the corresponding dictionary contents once you hit
8 'Space' or 'Return' key.
9
10 For instance a frequently used command such as ``git status`` can be abbreviated to ``gst`` as follows::
11
12 $ xontrib load abbrevs
13 $ abbrevs['gst'] = 'git status'
14 $ gst # Once you hit <space> or <return> 'gst' gets expanded to 'git status'.
15
16 one can set a callback function that receives current buffer and word to customize the expanded word based on context
17
18 .. code-block:: python
19
20 $ abbrevs['ps'] = lambda buffer, word: "procs" if buffer.text.startswith(word) else word
21
22
23 It is also possible to set the cursor position after expansion with,
24
25 $ abbrevs['gp'] = "git push <edit> --force"
26 """
27
28 import builtins
29 import typing as tp
30
31 from prompt_toolkit.filters import completion_is_selected, IsMultiline
32 from prompt_toolkit.keys import Keys
33 from xonsh.built_ins import DynamicAccessProxy
34 from xonsh.events import events
35 from xonsh.tools import check_for_partial_string
36
37 __all__ = ()
38
39 builtins.__xonsh__.abbrevs = dict()
40 proxy = DynamicAccessProxy("abbrevs", "__xonsh__.abbrevs")
41 setattr(builtins, "abbrevs", proxy)
42
43
44 class _LastExpanded(tp.NamedTuple):
45 word: str
46 expanded: str
47
48
49 last_expanded: tp.Optional[_LastExpanded] = None
50 EDIT_SYMBOL = "<edit>"
51
52
53 def get_abbreviated(key: str, buffer) -> str:
54 abbrevs = getattr(builtins, "abbrevs", None)
55 abbr = abbrevs[key]
56 if callable(abbr):
57 text = abbr(buffer=buffer, word=key)
58 else:
59 text = abbr
60 return text
61
62
63 def expand_abbrev(buffer) -> bool:
64 """expand the given abbr text. Return true if cursor position changed."""
65 global last_expanded
66 last_expanded = None
67 abbrevs = getattr(builtins, "abbrevs", None)
68 if abbrevs is None:
69 return False
70 document = buffer.document
71 word = document.get_word_before_cursor(WORD=True)
72 if word in abbrevs.keys():
73 partial = document.text[: document.cursor_position]
74 startix, endix, quote = check_for_partial_string(partial)
75 if startix is not None and endix is None:
76 return False
77 buffer.delete_before_cursor(count=len(word))
78 text = get_abbreviated(word, buffer)
79 buffer.insert_text(text)
80 last_expanded = _LastExpanded(word, text)
81 if EDIT_SYMBOL in text:
82 set_cursor_position(buffer, text)
83 return True
84 return False
85
86
87 def revert_abbrev(buffer) -> bool:
88 global last_expanded
89 if last_expanded is None:
90 return False
91 document = buffer.document
92 expansion = last_expanded.expanded + " "
93 if not document.text_before_cursor.endswith(expansion):
94 return False
95 buffer.delete_before_cursor(count=len(expansion))
96 buffer.insert_text(last_expanded.word)
97 last_expanded = None
98 return True
99
100
101 def set_cursor_position(buffer, expanded: str) -> None:
102 pos = expanded.rfind(EDIT_SYMBOL)
103 if pos == -1:
104 return
105 buffer.cursor_position = buffer.cursor_position - (len(expanded) - pos)
106 buffer.delete(len(EDIT_SYMBOL))
107
108
109 @events.on_ptk_create
110 def custom_keybindings(bindings, **kw):
111
112 from xonsh.ptk_shell.key_bindings import carriage_return
113 from prompt_toolkit.filters import EmacsInsertMode, ViInsertMode
114
115 handler = bindings.add
116 insert_mode = ViInsertMode() | EmacsInsertMode()
117
118 @handler(" ", filter=IsMultiline() & insert_mode)
119 def handle_space(event):
120 buffer = event.app.current_buffer
121
122 add_space = True
123 if not revert_abbrev(buffer):
124 position_changed = expand_abbrev(buffer)
125 if position_changed:
126 add_space = False
127 if add_space:
128 buffer.insert_text(" ")
129
130 @handler(
131 Keys.ControlJ, filter=IsMultiline() & insert_mode & ~completion_is_selected
132 )
133 @handler(
134 Keys.ControlM, filter=IsMultiline() & insert_mode & ~completion_is_selected
135 )
136 def multiline_carriage_return(event):
137 buffer = event.app.current_buffer
138 current_char = buffer.document.current_char
139 if not current_char or current_char.isspace():
140 expand_abbrev(buffer)
141 carriage_return(buffer, event.cli)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xontrib/abbrevs.py b/xontrib/abbrevs.py
--- a/xontrib/abbrevs.py
+++ b/xontrib/abbrevs.py
@@ -28,6 +28,7 @@
import builtins
import typing as tp
+from prompt_toolkit.buffer import Buffer
from prompt_toolkit.filters import completion_is_selected, IsMultiline
from prompt_toolkit.keys import Keys
from xonsh.built_ins import DynamicAccessProxy
@@ -60,7 +61,7 @@
return text
-def expand_abbrev(buffer) -> bool:
+def expand_abbrev(buffer: Buffer) -> bool:
"""expand the given abbr text. Return true if cursor position changed."""
global last_expanded
last_expanded = None
@@ -74,9 +75,11 @@
startix, endix, quote = check_for_partial_string(partial)
if startix is not None and endix is None:
return False
- buffer.delete_before_cursor(count=len(word))
text = get_abbreviated(word, buffer)
+
+ buffer.delete_before_cursor(count=len(word))
buffer.insert_text(text)
+
last_expanded = _LastExpanded(word, text)
if EDIT_SYMBOL in text:
set_cursor_position(buffer, text)
| {"golden_diff": "diff --git a/xontrib/abbrevs.py b/xontrib/abbrevs.py\n--- a/xontrib/abbrevs.py\n+++ b/xontrib/abbrevs.py\n@@ -28,6 +28,7 @@\n import builtins\n import typing as tp\n \n+from prompt_toolkit.buffer import Buffer\n from prompt_toolkit.filters import completion_is_selected, IsMultiline\n from prompt_toolkit.keys import Keys\n from xonsh.built_ins import DynamicAccessProxy\n@@ -60,7 +61,7 @@\n return text\n \n \n-def expand_abbrev(buffer) -> bool:\n+def expand_abbrev(buffer: Buffer) -> bool:\n \"\"\"expand the given abbr text. Return true if cursor position changed.\"\"\"\n global last_expanded\n last_expanded = None\n@@ -74,9 +75,11 @@\n startix, endix, quote = check_for_partial_string(partial)\n if startix is not None and endix is None:\n return False\n- buffer.delete_before_cursor(count=len(word))\n text = get_abbreviated(word, buffer)\n+\n+ buffer.delete_before_cursor(count=len(word))\n buffer.insert_text(text)\n+\n last_expanded = _LastExpanded(word, text)\n if EDIT_SYMBOL in text:\n set_cursor_position(buffer, text)\n", "issue": "abbrevs should only be expanded at the start of the command\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n+------------------+---------------------+\r\n| xonsh | 0.9.18 |\r\n| Git SHA | 26475146 |\r\n| Commit Date | May 5 06:42:28 2020 |\r\n| Python | 3.8.3 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 3.0.5 |\r\n| shell type | prompt_toolkit |\r\n| pygments | 2.6.1 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | manjaro |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n+------------------+---------------------+\r\n```\r\n\r\n</details>\r\n\r\n<!--- Tell us what should happen -->\r\nas of abbrevs expands in all places. So it may clash will subcommands. For example, if we have \r\n\r\n~~`aliases['ps'] = 'procs'`~~\r\n```\r\nabbrevs['ps'] = 'procs'\r\n``` \r\n\r\nand do type `docker ps` then it will get expanded to `docker procs`. \r\n\r\nSo the default behaviour should be to expand only at the start of the command. special handling for sudo can be added. \r\nAlso we could have special symbols in aliases to mark it as expandable in all places like `aliases['*ps*']` like the glob pattern. \r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCommand abbreviations.\n\nThis expands input words from `abbrevs` disctionary as you type.\nAdds ``abbrevs`` dictionary to hold user-defined \"command abbreviations.\nThe dictionary is searched as you type the matching words are replaced\nat the command line by the corresponding dictionary contents once you hit\n'Space' or 'Return' key.\n\nFor instance a frequently used command such as ``git status`` can be abbreviated to ``gst`` as follows::\n\n $ xontrib load abbrevs\n $ abbrevs['gst'] = 'git status'\n $ gst # Once you hit <space> or <return> 'gst' gets expanded to 'git status'.\n\none can set a callback function that receives current buffer and word to customize the expanded word based on context\n\n.. code-block:: python\n\n $ abbrevs['ps'] = lambda buffer, word: \"procs\" if buffer.text.startswith(word) else word\n\n\nIt is also possible to set the cursor position after expansion with,\n\n $ abbrevs['gp'] = \"git push <edit> --force\"\n\"\"\"\n\nimport builtins\nimport typing as tp\n\nfrom prompt_toolkit.filters import completion_is_selected, IsMultiline\nfrom prompt_toolkit.keys import Keys\nfrom xonsh.built_ins import DynamicAccessProxy\nfrom xonsh.events import events\nfrom xonsh.tools import check_for_partial_string\n\n__all__ = ()\n\nbuiltins.__xonsh__.abbrevs = dict()\nproxy = DynamicAccessProxy(\"abbrevs\", \"__xonsh__.abbrevs\")\nsetattr(builtins, \"abbrevs\", proxy)\n\n\nclass _LastExpanded(tp.NamedTuple):\n word: str\n expanded: str\n\n\nlast_expanded: tp.Optional[_LastExpanded] = None\nEDIT_SYMBOL = \"<edit>\"\n\n\ndef get_abbreviated(key: str, buffer) -> str:\n abbrevs = getattr(builtins, \"abbrevs\", None)\n abbr = abbrevs[key]\n if callable(abbr):\n text = abbr(buffer=buffer, word=key)\n else:\n text = abbr\n return text\n\n\ndef expand_abbrev(buffer) -> bool:\n \"\"\"expand the given abbr text. Return true if cursor position changed.\"\"\"\n global last_expanded\n last_expanded = None\n abbrevs = getattr(builtins, \"abbrevs\", None)\n if abbrevs is None:\n return False\n document = buffer.document\n word = document.get_word_before_cursor(WORD=True)\n if word in abbrevs.keys():\n partial = document.text[: document.cursor_position]\n startix, endix, quote = check_for_partial_string(partial)\n if startix is not None and endix is None:\n return False\n buffer.delete_before_cursor(count=len(word))\n text = get_abbreviated(word, buffer)\n buffer.insert_text(text)\n last_expanded = _LastExpanded(word, text)\n if EDIT_SYMBOL in text:\n set_cursor_position(buffer, text)\n return True\n return False\n\n\ndef revert_abbrev(buffer) -> bool:\n global last_expanded\n if last_expanded is None:\n return False\n document = buffer.document\n expansion = last_expanded.expanded + \" \"\n if not document.text_before_cursor.endswith(expansion):\n return False\n buffer.delete_before_cursor(count=len(expansion))\n buffer.insert_text(last_expanded.word)\n last_expanded = None\n return True\n\n\ndef set_cursor_position(buffer, expanded: str) -> None:\n pos = expanded.rfind(EDIT_SYMBOL)\n if pos == -1:\n return\n buffer.cursor_position = buffer.cursor_position - (len(expanded) - pos)\n buffer.delete(len(EDIT_SYMBOL))\n\n\[email protected]_ptk_create\ndef custom_keybindings(bindings, **kw):\n\n from xonsh.ptk_shell.key_bindings import carriage_return\n from prompt_toolkit.filters import EmacsInsertMode, ViInsertMode\n\n handler = bindings.add\n insert_mode = ViInsertMode() | EmacsInsertMode()\n\n @handler(\" \", filter=IsMultiline() & insert_mode)\n def handle_space(event):\n buffer = event.app.current_buffer\n\n add_space = True\n if not revert_abbrev(buffer):\n position_changed = expand_abbrev(buffer)\n if position_changed:\n add_space = False\n if add_space:\n buffer.insert_text(\" \")\n\n @handler(\n Keys.ControlJ, filter=IsMultiline() & insert_mode & ~completion_is_selected\n )\n @handler(\n Keys.ControlM, filter=IsMultiline() & insert_mode & ~completion_is_selected\n )\n def multiline_carriage_return(event):\n buffer = event.app.current_buffer\n current_char = buffer.document.current_char\n if not current_char or current_char.isspace():\n expand_abbrev(buffer)\n carriage_return(buffer, event.cli)\n", "path": "xontrib/abbrevs.py"}], "after_files": [{"content": "\"\"\"\nCommand abbreviations.\n\nThis expands input words from `abbrevs` disctionary as you type.\nAdds ``abbrevs`` dictionary to hold user-defined \"command abbreviations.\nThe dictionary is searched as you type the matching words are replaced\nat the command line by the corresponding dictionary contents once you hit\n'Space' or 'Return' key.\n\nFor instance a frequently used command such as ``git status`` can be abbreviated to ``gst`` as follows::\n\n $ xontrib load abbrevs\n $ abbrevs['gst'] = 'git status'\n $ gst # Once you hit <space> or <return> 'gst' gets expanded to 'git status'.\n\none can set a callback function that receives current buffer and word to customize the expanded word based on context\n\n.. code-block:: python\n\n $ abbrevs['ps'] = lambda buffer, word: \"procs\" if buffer.text.startswith(word) else word\n\n\nIt is also possible to set the cursor position after expansion with,\n\n $ abbrevs['gp'] = \"git push <edit> --force\"\n\"\"\"\n\nimport builtins\nimport typing as tp\n\nfrom prompt_toolkit.buffer import Buffer\nfrom prompt_toolkit.filters import completion_is_selected, IsMultiline\nfrom prompt_toolkit.keys import Keys\nfrom xonsh.built_ins import DynamicAccessProxy\nfrom xonsh.events import events\nfrom xonsh.tools import check_for_partial_string\n\n__all__ = ()\n\nbuiltins.__xonsh__.abbrevs = dict()\nproxy = DynamicAccessProxy(\"abbrevs\", \"__xonsh__.abbrevs\")\nsetattr(builtins, \"abbrevs\", proxy)\n\n\nclass _LastExpanded(tp.NamedTuple):\n word: str\n expanded: str\n\n\nlast_expanded: tp.Optional[_LastExpanded] = None\nEDIT_SYMBOL = \"<edit>\"\n\n\ndef get_abbreviated(key: str, buffer) -> str:\n abbrevs = getattr(builtins, \"abbrevs\", None)\n abbr = abbrevs[key]\n if callable(abbr):\n text = abbr(buffer=buffer, word=key)\n else:\n text = abbr\n return text\n\n\ndef expand_abbrev(buffer: Buffer) -> bool:\n \"\"\"expand the given abbr text. Return true if cursor position changed.\"\"\"\n global last_expanded\n last_expanded = None\n abbrevs = getattr(builtins, \"abbrevs\", None)\n if abbrevs is None:\n return False\n document = buffer.document\n word = document.get_word_before_cursor(WORD=True)\n if word in abbrevs.keys():\n partial = document.text[: document.cursor_position]\n startix, endix, quote = check_for_partial_string(partial)\n if startix is not None and endix is None:\n return False\n text = get_abbreviated(word, buffer)\n\n buffer.delete_before_cursor(count=len(word))\n buffer.insert_text(text)\n\n last_expanded = _LastExpanded(word, text)\n if EDIT_SYMBOL in text:\n set_cursor_position(buffer, text)\n return True\n return False\n\n\ndef revert_abbrev(buffer) -> bool:\n global last_expanded\n if last_expanded is None:\n return False\n document = buffer.document\n expansion = last_expanded.expanded + \" \"\n if not document.text_before_cursor.endswith(expansion):\n return False\n buffer.delete_before_cursor(count=len(expansion))\n buffer.insert_text(last_expanded.word)\n last_expanded = None\n return True\n\n\ndef set_cursor_position(buffer, expanded: str) -> None:\n pos = expanded.rfind(EDIT_SYMBOL)\n if pos == -1:\n return\n buffer.cursor_position = buffer.cursor_position - (len(expanded) - pos)\n buffer.delete(len(EDIT_SYMBOL))\n\n\[email protected]_ptk_create\ndef custom_keybindings(bindings, **kw):\n\n from xonsh.ptk_shell.key_bindings import carriage_return\n from prompt_toolkit.filters import EmacsInsertMode, ViInsertMode\n\n handler = bindings.add\n insert_mode = ViInsertMode() | EmacsInsertMode()\n\n @handler(\" \", filter=IsMultiline() & insert_mode)\n def handle_space(event):\n buffer = event.app.current_buffer\n\n add_space = True\n if not revert_abbrev(buffer):\n position_changed = expand_abbrev(buffer)\n if position_changed:\n add_space = False\n if add_space:\n buffer.insert_text(\" \")\n\n @handler(\n Keys.ControlJ, filter=IsMultiline() & insert_mode & ~completion_is_selected\n )\n @handler(\n Keys.ControlM, filter=IsMultiline() & insert_mode & ~completion_is_selected\n )\n def multiline_carriage_return(event):\n buffer = event.app.current_buffer\n current_char = buffer.document.current_char\n if not current_char or current_char.isspace():\n expand_abbrev(buffer)\n carriage_return(buffer, event.cli)\n", "path": "xontrib/abbrevs.py"}]} | 2,103 | 283 |
gh_patches_debug_35669 | rasdani/github-patches | git_diff | DistrictDataLabs__yellowbrick-679 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
step: removing 5 features in each iteration
Using RFECV, the graph shows best features with one increment in each iteration.
I want to show each iteration should work with an increment of 5 features.
Example: score should be plotted against x-axis of [5,10,15,20,25].
I used step=5, but its not working.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yellowbrick/features/rfecv.py`
Content:
```
1 # yellowbrick.features.rfecv
2 # Visualize the number of features selected with recursive feature elimination
3 #
4 # Author: Benjamin Bengfort <[email protected]>
5 # Created: Tue Apr 03 17:31:37 2018 -0400
6 #
7 # ID: rfecv.py [] [email protected] $
8
9 """
10 Visualize the number of features selected using recursive feature elimination
11 """
12
13 ##########################################################################
14 ## Imports
15 ##########################################################################
16
17 import numpy as np
18
19 from yellowbrick.base import ModelVisualizer
20 from yellowbrick.exceptions import YellowbrickValueError
21
22 from sklearn.utils import check_X_y
23 from sklearn.feature_selection import RFE
24 from sklearn.model_selection import cross_val_score
25
26
27 ##########################################################################
28 ## Recursive Feature Elimination
29 ##########################################################################
30
31 class RFECV(ModelVisualizer):
32 """
33 Recursive Feature Elimination, Cross-Validated (RFECV) feature selection.
34
35 Selects the best subset of features for the supplied estimator by removing
36 0 to N features (where N is the number of features) using recursive
37 feature elimination, then selecting the best subset based on the
38 cross-validation score of the model. Recursive feature elimination
39 eliminates n features from a model by fitting the model multiple times and
40 at each step, removing the weakest features, determined by either the
41 ``coef_`` or ``feature_importances_`` attribute of the fitted model.
42
43 The visualization plots the score relative to each subset and shows trends
44 in feature elimination. If the feature elimination CV score is flat, then
45 potentially there are not enough features in the model. An ideal curve is
46 when the score jumps from low to high as the number of features removed
47 increases, then slowly decreases again from the optimal number of
48 features.
49
50 Parameters
51 ----------
52 model : a scikit-learn estimator
53 An object that implements ``fit`` and provides information about the
54 relative importance of features with either a ``coef_`` or
55 ``feature_importances_`` attribute.
56
57 Note that the object is cloned for each validation.
58
59 ax : matplotlib.Axes object, optional
60 The axes object to plot the figure on.
61
62 step : int or float, optional (default=1)
63 If greater than or equal to 1, then step corresponds to the (integer)
64 number of features to remove at each iteration. If within (0.0, 1.0),
65 then step corresponds to the percentage (rounded down) of features to
66 remove at each iteration.
67
68 groups : array-like, with shape (n_samples,), optional
69 Group labels for the samples used while splitting the dataset into
70 train/test set.
71
72 cv : int, cross-validation generator or an iterable, optional
73 Determines the cross-validation splitting strategy.
74 Possible inputs for cv are:
75
76 - None, to use the default 3-fold cross-validation,
77 - integer, to specify the number of folds.
78 - An object to be used as a cross-validation generator.
79 - An iterable yielding train/test splits.
80
81 see the scikit-learn
82 `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
83 for more information on the possible strategies that can be used here.
84
85 scoring : string, callable or None, optional, default: None
86 A string or scorer callable object / function with signature
87 ``scorer(estimator, X, y)``. See scikit-learn model evaluation
88 documentation for names of possible metrics.
89
90 kwargs : dict
91 Keyword arguments that are passed to the base class and may influence
92 the visualization as defined in other Visualizers.
93
94 Attributes
95 ----------
96 n_features_ : int
97 The number of features in the selected subset
98
99 support_ : array of shape [n_features]
100 A mask of the selected features
101
102 ranking_ : array of shape [n_features]
103 The feature ranking, such that ``ranking_[i]`` corresponds to the
104 ranked position of feature i. Selected features are assigned rank 1.
105
106 cv_scores_ : array of shape [n_subsets_of_features, n_splits]
107 The cross-validation scores for each subset of features and splits in
108 the cross-validation strategy.
109
110 rfe_estimator_ : sklearn.feature_selection.RFE
111 A fitted RFE estimator wrapping the original estimator. All estimator
112 functions such as ``predict()`` and ``score()`` are passed through to
113 this estimator (it rewraps the original model).
114
115 Notes
116 -----
117 This model wraps ``sklearn.feature_selection.RFE`` and not
118 ``sklearn.feature_selection.RFECV`` because access to the internals of the
119 CV and RFE estimators is required for the visualization. The visualizer
120 does take similar arguments, however it does not expose the same internal
121 attributes.
122
123 Additionally, the RFE model can be accessed via the ``rfe_estimator_``
124 attribute. Once fitted, the visualizer acts as a wrapper for this
125 estimator and not for the original model passed to the model. This way the
126 visualizer model can be used to make predictions.
127
128 .. caution:: This visualizer requires a model that has either a ``coef_``
129 or ``feature_importances_`` attribute when fitted.
130 """
131
132 def __init__(self, model, ax=None, step=1, groups=None, cv=None,
133 scoring=None, **kwargs):
134
135 # Initialize the model visualizer
136 super(RFECV, self).__init__(model, ax=ax, **kwargs)
137
138 # Set parameters
139 self.set_params(step=step, groups=groups, cv=cv, scoring=scoring)
140
141 def fit(self, X, y=None):
142 """
143 Fits the RFECV with the wrapped model to the specified data and draws
144 the rfecv curve with the optimal number of features found.
145
146 Parameters
147 ----------
148 X : array-like, shape (n_samples, n_features)
149 Training vector, where n_samples is the number of samples and
150 n_features is the number of features.
151
152 y : array-like, shape (n_samples) or (n_samples, n_features), optional
153 Target relative to X for classification or regression.
154
155 Returns
156 -------
157 self : instance
158 Returns the instance of the RFECV visualizer.
159 """
160 X, y = check_X_y(X, y, "csr")
161 n_features = X.shape[1]
162
163 # This check is kind of unnecessary since RFE will do it, but it's
164 # nice to get it out of the way ASAP and raise a meaningful error.
165 if 0.0 < self.step < 1.0:
166 step = int(max(1, self.step * n_features))
167 else:
168 step = int(self.step)
169
170 if step < 0:
171 raise YellowbrickValueError("step must be >0")
172
173 # Create the RFE model
174 rfe = RFE(self.estimator, step=step)
175 n_feature_subsets = np.arange(1, n_features+1)
176
177 # Create the cross validation params
178 # TODO: handle random state
179 cv_params = {
180 key: self.get_params()[key]
181 for key in ('groups', 'cv', 'scoring')
182 }
183
184 # Perform cross-validation for each feature subset
185 scores = []
186 for n_features_to_select in n_feature_subsets:
187 rfe.set_params(n_features_to_select=n_features_to_select)
188 scores.append(cross_val_score(rfe, X, y, **cv_params))
189
190 # Convert scores to array
191 self.cv_scores_ = np.array(scores)
192
193 # Find the best RFE model
194 bestidx = self.cv_scores_.mean(axis=1).argmax()
195 self.n_features_ = n_feature_subsets[bestidx]
196
197 # Fit the final RFE model for the number of features
198 self.rfe_estimator_ = rfe
199 self.rfe_estimator_.set_params(n_features_to_select=self.n_features_)
200 self.rfe_estimator_.fit(X, y)
201
202 # Rewrap the visualizer to use the rfe estimator
203 self._wrapped = self.rfe_estimator_
204
205 # Hoist the RFE params to the visualizer
206 self.support_ = self.rfe_estimator_.support_
207 self.ranking_ = self.rfe_estimator_.ranking_
208
209 self.draw()
210 return self
211
212 def draw(self, **kwargs):
213 """
214 Renders the rfecv curve.
215 """
216 # Compute the curves
217 x = np.arange(1, len(self.cv_scores_)+1)
218 means = self.cv_scores_.mean(axis=1)
219 sigmas = self.cv_scores_.std(axis=1)
220
221
222 # Plot one standard deviation above and below the mean
223 self.ax.fill_between(x, means - sigmas, means+sigmas, alpha=0.25)
224
225 # Plot the curve
226 self.ax.plot(x, means, 'o-')
227
228 # Plot the maximum number of features
229 self.ax.axvline(
230 self.n_features_, c='k', ls='--',
231 label="n_features = {}\nscore = {:0.3f}".format(
232 self.n_features_, self.cv_scores_.mean(axis=1).max()
233 )
234 )
235
236 return self.ax
237
238 def finalize(self, **kwargs):
239 """
240 Add the title, legend, and other visual final touches to the plot.
241 """
242 # Set the title of the figure
243 self.set_title('RFECV for {}'.format(self.name))
244
245 # Add the legend
246 self.ax.legend(frameon=True, loc='best')
247
248 # Set the axis labels
249 self.ax.set_xlabel('Number of Features Selected')
250 self.ax.set_ylabel('Score')
251
252
253 ##########################################################################
254 ## Quick Methods
255 ##########################################################################
256
257 def rfecv(model, X, y, ax=None, step=1, groups=None, cv=None,
258 scoring=None, **kwargs):
259 """
260 Performs recursive feature elimination with cross-validation to determine
261 an optimal number of features for a model. Visualizes the feature subsets
262 with respect to the cross-validation score.
263
264 This helper function is a quick wrapper to utilize the RFECV visualizer
265 for one-off analysis.
266
267 Parameters
268 ----------
269 model : a scikit-learn estimator
270 An object that implements ``fit`` and provides information about the
271 relative importance of features with either a ``coef_`` or
272 ``feature_importances_`` attribute.
273
274 Note that the object is cloned for each validation.
275
276 X : array-like, shape (n_samples, n_features)
277 Training vector, where n_samples is the number of samples and
278 n_features is the number of features.
279
280 y : array-like, shape (n_samples) or (n_samples, n_features), optional
281 Target relative to X for classification or regression.
282
283 ax : matplotlib.Axes object, optional
284 The axes object to plot the figure on.
285
286 step : int or float, optional (default=1)
287 If greater than or equal to 1, then step corresponds to the (integer)
288 number of features to remove at each iteration. If within (0.0, 1.0),
289 then step corresponds to the percentage (rounded down) of features to
290 remove at each iteration.
291
292 groups : array-like, with shape (n_samples,), optional
293 Group labels for the samples used while splitting the dataset into
294 train/test set.
295
296 cv : int, cross-validation generator or an iterable, optional
297 Determines the cross-validation splitting strategy.
298 Possible inputs for cv are:
299
300 - None, to use the default 3-fold cross-validation,
301 - integer, to specify the number of folds.
302 - An object to be used as a cross-validation generator.
303 - An iterable yielding train/test splits.
304
305 see the scikit-learn
306 `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
307 for more information on the possible strategies that can be used here.
308
309 scoring : string, callable or None, optional, default: None
310 A string or scorer callable object / function with signature
311 ``scorer(estimator, X, y)``. See scikit-learn model evaluation
312 documentation for names of possible metrics.
313
314 kwargs : dict
315 Keyword arguments that are passed to the base class and may influence
316 the visualization as defined in other Visualizers. These arguments are
317 also passed to the `poof()` method, e.g. can pass a path to save the
318 figure to.
319
320 Returns
321 -------
322 ax : matplotlib axes
323 Returns the axes that the rfecv were drawn on.
324 """
325 # Initialize the visualizer
326 oz = RFECV(model, ax=ax, step=step, groups=groups, cv=cv, scoring=scoring)
327
328 # Fit and poof the visualizer
329 oz.fit(X, y)
330 oz.poof(**kwargs)
331 return oz.ax
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yellowbrick/features/rfecv.py b/yellowbrick/features/rfecv.py
--- a/yellowbrick/features/rfecv.py
+++ b/yellowbrick/features/rfecv.py
@@ -112,6 +112,10 @@
functions such as ``predict()`` and ``score()`` are passed through to
this estimator (it rewraps the original model).
+ n_feature_subsets_ : array of shape [n_subsets_of_features]
+ The number of features removed on each iteration of RFE, computed by the
+ number of features in the dataset and the step parameter.
+
Notes
-----
This model wraps ``sklearn.feature_selection.RFE`` and not
@@ -172,7 +176,7 @@
# Create the RFE model
rfe = RFE(self.estimator, step=step)
- n_feature_subsets = np.arange(1, n_features+1)
+ self.n_feature_subsets_ = np.arange(1, n_features+step, step)
# Create the cross validation params
# TODO: handle random state
@@ -183,7 +187,7 @@
# Perform cross-validation for each feature subset
scores = []
- for n_features_to_select in n_feature_subsets:
+ for n_features_to_select in self.n_feature_subsets_:
rfe.set_params(n_features_to_select=n_features_to_select)
scores.append(cross_val_score(rfe, X, y, **cv_params))
@@ -192,7 +196,7 @@
# Find the best RFE model
bestidx = self.cv_scores_.mean(axis=1).argmax()
- self.n_features_ = n_feature_subsets[bestidx]
+ self.n_features_ = self.n_feature_subsets_[bestidx]
# Fit the final RFE model for the number of features
self.rfe_estimator_ = rfe
@@ -214,7 +218,7 @@
Renders the rfecv curve.
"""
# Compute the curves
- x = np.arange(1, len(self.cv_scores_)+1)
+ x = self.n_feature_subsets_
means = self.cv_scores_.mean(axis=1)
sigmas = self.cv_scores_.std(axis=1)
| {"golden_diff": "diff --git a/yellowbrick/features/rfecv.py b/yellowbrick/features/rfecv.py\n--- a/yellowbrick/features/rfecv.py\n+++ b/yellowbrick/features/rfecv.py\n@@ -112,6 +112,10 @@\n functions such as ``predict()`` and ``score()`` are passed through to\n this estimator (it rewraps the original model).\n \n+ n_feature_subsets_ : array of shape [n_subsets_of_features]\n+ The number of features removed on each iteration of RFE, computed by the\n+ number of features in the dataset and the step parameter.\n+\n Notes\n -----\n This model wraps ``sklearn.feature_selection.RFE`` and not\n@@ -172,7 +176,7 @@\n \n # Create the RFE model\n rfe = RFE(self.estimator, step=step)\n- n_feature_subsets = np.arange(1, n_features+1)\n+ self.n_feature_subsets_ = np.arange(1, n_features+step, step)\n \n # Create the cross validation params\n # TODO: handle random state\n@@ -183,7 +187,7 @@\n \n # Perform cross-validation for each feature subset\n scores = []\n- for n_features_to_select in n_feature_subsets:\n+ for n_features_to_select in self.n_feature_subsets_:\n rfe.set_params(n_features_to_select=n_features_to_select)\n scores.append(cross_val_score(rfe, X, y, **cv_params))\n \n@@ -192,7 +196,7 @@\n \n # Find the best RFE model\n bestidx = self.cv_scores_.mean(axis=1).argmax()\n- self.n_features_ = n_feature_subsets[bestidx]\n+ self.n_features_ = self.n_feature_subsets_[bestidx]\n \n # Fit the final RFE model for the number of features\n self.rfe_estimator_ = rfe\n@@ -214,7 +218,7 @@\n Renders the rfecv curve.\n \"\"\"\n # Compute the curves\n- x = np.arange(1, len(self.cv_scores_)+1)\n+ x = self.n_feature_subsets_\n means = self.cv_scores_.mean(axis=1)\n sigmas = self.cv_scores_.std(axis=1)\n", "issue": "step: removing 5 features in each iteration\nUsing RFECV, the graph shows best features with one increment in each iteration.\r\nI want to show each iteration should work with an increment of 5 features.\r\nExample: score should be plotted against x-axis of [5,10,15,20,25].\r\nI used step=5, but its not working.\r\n\r\n\n", "before_files": [{"content": "# yellowbrick.features.rfecv\n# Visualize the number of features selected with recursive feature elimination\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Tue Apr 03 17:31:37 2018 -0400\n#\n# ID: rfecv.py [] [email protected] $\n\n\"\"\"\nVisualize the number of features selected using recursive feature elimination\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom yellowbrick.base import ModelVisualizer\nfrom yellowbrick.exceptions import YellowbrickValueError\n\nfrom sklearn.utils import check_X_y\nfrom sklearn.feature_selection import RFE\nfrom sklearn.model_selection import cross_val_score\n\n\n##########################################################################\n## Recursive Feature Elimination\n##########################################################################\n\nclass RFECV(ModelVisualizer):\n \"\"\"\n Recursive Feature Elimination, Cross-Validated (RFECV) feature selection.\n\n Selects the best subset of features for the supplied estimator by removing\n 0 to N features (where N is the number of features) using recursive\n feature elimination, then selecting the best subset based on the\n cross-validation score of the model. Recursive feature elimination\n eliminates n features from a model by fitting the model multiple times and\n at each step, removing the weakest features, determined by either the\n ``coef_`` or ``feature_importances_`` attribute of the fitted model.\n\n The visualization plots the score relative to each subset and shows trends\n in feature elimination. If the feature elimination CV score is flat, then\n potentially there are not enough features in the model. An ideal curve is\n when the score jumps from low to high as the number of features removed\n increases, then slowly decreases again from the optimal number of\n features.\n\n Parameters\n ----------\n model : a scikit-learn estimator\n An object that implements ``fit`` and provides information about the\n relative importance of features with either a ``coef_`` or\n ``feature_importances_`` attribute.\n\n Note that the object is cloned for each validation.\n\n ax : matplotlib.Axes object, optional\n The axes object to plot the figure on.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then step corresponds to the (integer)\n number of features to remove at each iteration. If within (0.0, 1.0),\n then step corresponds to the percentage (rounded down) of features to\n remove at each iteration.\n\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n see the scikit-learn\n `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_\n for more information on the possible strategies that can be used here.\n\n scoring : string, callable or None, optional, default: None\n A string or scorer callable object / function with signature\n ``scorer(estimator, X, y)``. See scikit-learn model evaluation\n documentation for names of possible metrics.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers.\n\n Attributes\n ----------\n n_features_ : int\n The number of features in the selected subset\n\n support_ : array of shape [n_features]\n A mask of the selected features\n\n ranking_ : array of shape [n_features]\n The feature ranking, such that ``ranking_[i]`` corresponds to the\n ranked position of feature i. Selected features are assigned rank 1.\n\n cv_scores_ : array of shape [n_subsets_of_features, n_splits]\n The cross-validation scores for each subset of features and splits in\n the cross-validation strategy.\n\n rfe_estimator_ : sklearn.feature_selection.RFE\n A fitted RFE estimator wrapping the original estimator. All estimator\n functions such as ``predict()`` and ``score()`` are passed through to\n this estimator (it rewraps the original model).\n\n Notes\n -----\n This model wraps ``sklearn.feature_selection.RFE`` and not\n ``sklearn.feature_selection.RFECV`` because access to the internals of the\n CV and RFE estimators is required for the visualization. The visualizer\n does take similar arguments, however it does not expose the same internal\n attributes.\n\n Additionally, the RFE model can be accessed via the ``rfe_estimator_``\n attribute. Once fitted, the visualizer acts as a wrapper for this\n estimator and not for the original model passed to the model. This way the\n visualizer model can be used to make predictions.\n\n .. caution:: This visualizer requires a model that has either a ``coef_``\n or ``feature_importances_`` attribute when fitted.\n \"\"\"\n\n def __init__(self, model, ax=None, step=1, groups=None, cv=None,\n scoring=None, **kwargs):\n\n # Initialize the model visualizer\n super(RFECV, self).__init__(model, ax=ax, **kwargs)\n\n # Set parameters\n self.set_params(step=step, groups=groups, cv=cv, scoring=scoring)\n\n def fit(self, X, y=None):\n \"\"\"\n Fits the RFECV with the wrapped model to the specified data and draws\n the rfecv curve with the optimal number of features found.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression.\n\n Returns\n -------\n self : instance\n Returns the instance of the RFECV visualizer.\n \"\"\"\n X, y = check_X_y(X, y, \"csr\")\n n_features = X.shape[1]\n\n # This check is kind of unnecessary since RFE will do it, but it's\n # nice to get it out of the way ASAP and raise a meaningful error.\n if 0.0 < self.step < 1.0:\n step = int(max(1, self.step * n_features))\n else:\n step = int(self.step)\n\n if step < 0:\n raise YellowbrickValueError(\"step must be >0\")\n\n # Create the RFE model\n rfe = RFE(self.estimator, step=step)\n n_feature_subsets = np.arange(1, n_features+1)\n\n # Create the cross validation params\n # TODO: handle random state\n cv_params = {\n key: self.get_params()[key]\n for key in ('groups', 'cv', 'scoring')\n }\n\n # Perform cross-validation for each feature subset\n scores = []\n for n_features_to_select in n_feature_subsets:\n rfe.set_params(n_features_to_select=n_features_to_select)\n scores.append(cross_val_score(rfe, X, y, **cv_params))\n\n # Convert scores to array\n self.cv_scores_ = np.array(scores)\n\n # Find the best RFE model\n bestidx = self.cv_scores_.mean(axis=1).argmax()\n self.n_features_ = n_feature_subsets[bestidx]\n\n # Fit the final RFE model for the number of features\n self.rfe_estimator_ = rfe\n self.rfe_estimator_.set_params(n_features_to_select=self.n_features_)\n self.rfe_estimator_.fit(X, y)\n\n # Rewrap the visualizer to use the rfe estimator\n self._wrapped = self.rfe_estimator_\n\n # Hoist the RFE params to the visualizer\n self.support_ = self.rfe_estimator_.support_\n self.ranking_ = self.rfe_estimator_.ranking_\n\n self.draw()\n return self\n\n def draw(self, **kwargs):\n \"\"\"\n Renders the rfecv curve.\n \"\"\"\n # Compute the curves\n x = np.arange(1, len(self.cv_scores_)+1)\n means = self.cv_scores_.mean(axis=1)\n sigmas = self.cv_scores_.std(axis=1)\n\n\n # Plot one standard deviation above and below the mean\n self.ax.fill_between(x, means - sigmas, means+sigmas, alpha=0.25)\n\n # Plot the curve\n self.ax.plot(x, means, 'o-')\n\n # Plot the maximum number of features\n self.ax.axvline(\n self.n_features_, c='k', ls='--',\n label=\"n_features = {}\\nscore = {:0.3f}\".format(\n self.n_features_, self.cv_scores_.mean(axis=1).max()\n )\n )\n\n return self.ax\n\n def finalize(self, **kwargs):\n \"\"\"\n Add the title, legend, and other visual final touches to the plot.\n \"\"\"\n # Set the title of the figure\n self.set_title('RFECV for {}'.format(self.name))\n\n # Add the legend\n self.ax.legend(frameon=True, loc='best')\n\n # Set the axis labels\n self.ax.set_xlabel('Number of Features Selected')\n self.ax.set_ylabel('Score')\n\n\n##########################################################################\n## Quick Methods\n##########################################################################\n\ndef rfecv(model, X, y, ax=None, step=1, groups=None, cv=None,\n scoring=None, **kwargs):\n \"\"\"\n Performs recursive feature elimination with cross-validation to determine\n an optimal number of features for a model. Visualizes the feature subsets\n with respect to the cross-validation score.\n\n This helper function is a quick wrapper to utilize the RFECV visualizer\n for one-off analysis.\n\n Parameters\n ----------\n model : a scikit-learn estimator\n An object that implements ``fit`` and provides information about the\n relative importance of features with either a ``coef_`` or\n ``feature_importances_`` attribute.\n\n Note that the object is cloned for each validation.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression.\n\n ax : matplotlib.Axes object, optional\n The axes object to plot the figure on.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then step corresponds to the (integer)\n number of features to remove at each iteration. If within (0.0, 1.0),\n then step corresponds to the percentage (rounded down) of features to\n remove at each iteration.\n\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n see the scikit-learn\n `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_\n for more information on the possible strategies that can be used here.\n\n scoring : string, callable or None, optional, default: None\n A string or scorer callable object / function with signature\n ``scorer(estimator, X, y)``. See scikit-learn model evaluation\n documentation for names of possible metrics.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers. These arguments are\n also passed to the `poof()` method, e.g. can pass a path to save the\n figure to.\n\n Returns\n -------\n ax : matplotlib axes\n Returns the axes that the rfecv were drawn on.\n \"\"\"\n # Initialize the visualizer\n oz = RFECV(model, ax=ax, step=step, groups=groups, cv=cv, scoring=scoring)\n\n # Fit and poof the visualizer\n oz.fit(X, y)\n oz.poof(**kwargs)\n return oz.ax\n", "path": "yellowbrick/features/rfecv.py"}], "after_files": [{"content": "# yellowbrick.features.rfecv\n# Visualize the number of features selected with recursive feature elimination\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Tue Apr 03 17:31:37 2018 -0400\n#\n# ID: rfecv.py [] [email protected] $\n\n\"\"\"\nVisualize the number of features selected using recursive feature elimination\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom yellowbrick.base import ModelVisualizer\nfrom yellowbrick.exceptions import YellowbrickValueError\n\nfrom sklearn.utils import check_X_y\nfrom sklearn.feature_selection import RFE\nfrom sklearn.model_selection import cross_val_score\n\n\n##########################################################################\n## Recursive Feature Elimination\n##########################################################################\n\nclass RFECV(ModelVisualizer):\n \"\"\"\n Recursive Feature Elimination, Cross-Validated (RFECV) feature selection.\n\n Selects the best subset of features for the supplied estimator by removing\n 0 to N features (where N is the number of features) using recursive\n feature elimination, then selecting the best subset based on the\n cross-validation score of the model. Recursive feature elimination\n eliminates n features from a model by fitting the model multiple times and\n at each step, removing the weakest features, determined by either the\n ``coef_`` or ``feature_importances_`` attribute of the fitted model.\n\n The visualization plots the score relative to each subset and shows trends\n in feature elimination. If the feature elimination CV score is flat, then\n potentially there are not enough features in the model. An ideal curve is\n when the score jumps from low to high as the number of features removed\n increases, then slowly decreases again from the optimal number of\n features.\n\n Parameters\n ----------\n model : a scikit-learn estimator\n An object that implements ``fit`` and provides information about the\n relative importance of features with either a ``coef_`` or\n ``feature_importances_`` attribute.\n\n Note that the object is cloned for each validation.\n\n ax : matplotlib.Axes object, optional\n The axes object to plot the figure on.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then step corresponds to the (integer)\n number of features to remove at each iteration. If within (0.0, 1.0),\n then step corresponds to the percentage (rounded down) of features to\n remove at each iteration.\n\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n see the scikit-learn\n `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_\n for more information on the possible strategies that can be used here.\n\n scoring : string, callable or None, optional, default: None\n A string or scorer callable object / function with signature\n ``scorer(estimator, X, y)``. See scikit-learn model evaluation\n documentation for names of possible metrics.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers.\n\n Attributes\n ----------\n n_features_ : int\n The number of features in the selected subset\n\n support_ : array of shape [n_features]\n A mask of the selected features\n\n ranking_ : array of shape [n_features]\n The feature ranking, such that ``ranking_[i]`` corresponds to the\n ranked position of feature i. Selected features are assigned rank 1.\n\n cv_scores_ : array of shape [n_subsets_of_features, n_splits]\n The cross-validation scores for each subset of features and splits in\n the cross-validation strategy.\n\n rfe_estimator_ : sklearn.feature_selection.RFE\n A fitted RFE estimator wrapping the original estimator. All estimator\n functions such as ``predict()`` and ``score()`` are passed through to\n this estimator (it rewraps the original model).\n\n n_feature_subsets_ : array of shape [n_subsets_of_features]\n The number of features removed on each iteration of RFE, computed by the\n number of features in the dataset and the step parameter.\n\n Notes\n -----\n This model wraps ``sklearn.feature_selection.RFE`` and not\n ``sklearn.feature_selection.RFECV`` because access to the internals of the\n CV and RFE estimators is required for the visualization. The visualizer\n does take similar arguments, however it does not expose the same internal\n attributes.\n\n Additionally, the RFE model can be accessed via the ``rfe_estimator_``\n attribute. Once fitted, the visualizer acts as a wrapper for this\n estimator and not for the original model passed to the model. This way the\n visualizer model can be used to make predictions.\n\n .. caution:: This visualizer requires a model that has either a ``coef_``\n or ``feature_importances_`` attribute when fitted.\n \"\"\"\n\n def __init__(self, model, ax=None, step=1, groups=None, cv=None,\n scoring=None, **kwargs):\n\n # Initialize the model visualizer\n super(RFECV, self).__init__(model, ax=ax, **kwargs)\n\n # Set parameters\n self.set_params(step=step, groups=groups, cv=cv, scoring=scoring)\n\n def fit(self, X, y=None):\n \"\"\"\n Fits the RFECV with the wrapped model to the specified data and draws\n the rfecv curve with the optimal number of features found.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression.\n\n Returns\n -------\n self : instance\n Returns the instance of the RFECV visualizer.\n \"\"\"\n X, y = check_X_y(X, y, \"csr\")\n n_features = X.shape[1]\n\n # This check is kind of unnecessary since RFE will do it, but it's\n # nice to get it out of the way ASAP and raise a meaningful error.\n if 0.0 < self.step < 1.0:\n step = int(max(1, self.step * n_features))\n else:\n step = int(self.step)\n\n if step < 0:\n raise YellowbrickValueError(\"step must be >0\")\n\n # Create the RFE model\n rfe = RFE(self.estimator, step=step)\n self.n_feature_subsets_ = np.arange(1, n_features+step, step)\n\n # Create the cross validation params\n # TODO: handle random state\n cv_params = {\n key: self.get_params()[key]\n for key in ('groups', 'cv', 'scoring')\n }\n\n # Perform cross-validation for each feature subset\n scores = []\n for n_features_to_select in self.n_feature_subsets_:\n rfe.set_params(n_features_to_select=n_features_to_select)\n scores.append(cross_val_score(rfe, X, y, **cv_params))\n\n # Convert scores to array\n self.cv_scores_ = np.array(scores)\n\n # Find the best RFE model\n bestidx = self.cv_scores_.mean(axis=1).argmax()\n self.n_features_ = self.n_feature_subsets_[bestidx]\n\n # Fit the final RFE model for the number of features\n self.rfe_estimator_ = rfe\n self.rfe_estimator_.set_params(n_features_to_select=self.n_features_)\n self.rfe_estimator_.fit(X, y)\n\n # Rewrap the visualizer to use the rfe estimator\n self._wrapped = self.rfe_estimator_\n\n # Hoist the RFE params to the visualizer\n self.support_ = self.rfe_estimator_.support_\n self.ranking_ = self.rfe_estimator_.ranking_\n\n self.draw()\n return self\n\n def draw(self, **kwargs):\n \"\"\"\n Renders the rfecv curve.\n \"\"\"\n # Compute the curves\n x = self.n_feature_subsets_\n means = self.cv_scores_.mean(axis=1)\n sigmas = self.cv_scores_.std(axis=1)\n\n\n # Plot one standard deviation above and below the mean\n self.ax.fill_between(x, means - sigmas, means+sigmas, alpha=0.25)\n\n # Plot the curve\n self.ax.plot(x, means, 'o-')\n\n # Plot the maximum number of features\n self.ax.axvline(\n self.n_features_, c='k', ls='--',\n label=\"n_features = {}\\nscore = {:0.3f}\".format(\n self.n_features_, self.cv_scores_.mean(axis=1).max()\n )\n )\n\n return self.ax\n\n def finalize(self, **kwargs):\n \"\"\"\n Add the title, legend, and other visual final touches to the plot.\n \"\"\"\n # Set the title of the figure\n self.set_title('RFECV for {}'.format(self.name))\n\n # Add the legend\n self.ax.legend(frameon=True, loc='best')\n\n # Set the axis labels\n self.ax.set_xlabel('Number of Features Selected')\n self.ax.set_ylabel('Score')\n\n\n##########################################################################\n## Quick Methods\n##########################################################################\n\ndef rfecv(model, X, y, ax=None, step=1, groups=None, cv=None,\n scoring=None, **kwargs):\n \"\"\"\n Performs recursive feature elimination with cross-validation to determine\n an optimal number of features for a model. Visualizes the feature subsets\n with respect to the cross-validation score.\n\n This helper function is a quick wrapper to utilize the RFECV visualizer\n for one-off analysis.\n\n Parameters\n ----------\n model : a scikit-learn estimator\n An object that implements ``fit`` and provides information about the\n relative importance of features with either a ``coef_`` or\n ``feature_importances_`` attribute.\n\n Note that the object is cloned for each validation.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression.\n\n ax : matplotlib.Axes object, optional\n The axes object to plot the figure on.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then step corresponds to the (integer)\n number of features to remove at each iteration. If within (0.0, 1.0),\n then step corresponds to the percentage (rounded down) of features to\n remove at each iteration.\n\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n see the scikit-learn\n `cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_\n for more information on the possible strategies that can be used here.\n\n scoring : string, callable or None, optional, default: None\n A string or scorer callable object / function with signature\n ``scorer(estimator, X, y)``. See scikit-learn model evaluation\n documentation for names of possible metrics.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers. These arguments are\n also passed to the `poof()` method, e.g. can pass a path to save the\n figure to.\n\n Returns\n -------\n ax : matplotlib axes\n Returns the axes that the rfecv were drawn on.\n \"\"\"\n # Initialize the visualizer\n oz = RFECV(model, ax=ax, step=step, groups=groups, cv=cv, scoring=scoring)\n\n # Fit and poof the visualizer\n oz.fit(X, y)\n oz.poof(**kwargs)\n return oz.ax\n", "path": "yellowbrick/features/rfecv.py"}]} | 4,045 | 520 |
gh_patches_debug_11004 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-1841 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Alpha channel and grayscale in image-to-text with -image_channel_size=3
For training image to text, the argument `-image_channel_size=3` imply that the images already have the good number of channel. However, some of my images are black and white and saved with only one channel or saved in RGB but with the alpha channel.
I could fix it with a change in `onmt/inputters/image_dataset.py` [here](https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py#L78):
from this:
```
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
```
to this:
```
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 1)))
```
The flag in `cv2.imread` with value of 1 tell cv2 to convert to RGB no matter what the original image is.
Should I do a PR ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/inputters/image_dataset.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import os
4
5 import torch
6 from torchtext.data import Field
7
8 from onmt.inputters.datareader_base import DataReaderBase
9
10 # domain specific dependencies
11 try:
12 from PIL import Image
13 from torchvision import transforms
14 import cv2
15 except ImportError:
16 Image, transforms, cv2 = None, None, None
17
18
19 class ImageDataReader(DataReaderBase):
20 """Read image data from disk.
21
22 Args:
23 truncate (tuple[int] or NoneType): maximum img size. Use
24 ``(0,0)`` or ``None`` for unlimited.
25 channel_size (int): Number of channels per image.
26
27 Raises:
28 onmt.inputters.datareader_base.MissingDependencyException: If
29 importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.
30 """
31
32 def __init__(self, truncate=None, channel_size=3):
33 self._check_deps()
34 self.truncate = truncate
35 self.channel_size = channel_size
36
37 @classmethod
38 def from_opt(cls, opt):
39 return cls(channel_size=opt.image_channel_size)
40
41 @classmethod
42 def _check_deps(cls):
43 if any([Image is None, transforms is None, cv2 is None]):
44 cls._raise_missing_dep(
45 "PIL", "torchvision", "cv2")
46
47 def read(self, images, side, img_dir=None):
48 """Read data into dicts.
49
50 Args:
51 images (str or Iterable[str]): Sequence of image paths or
52 path to file containing audio paths.
53 In either case, the filenames may be relative to ``src_dir``
54 (default behavior) or absolute.
55 side (str): Prefix used in return dict. Usually
56 ``"src"`` or ``"tgt"``.
57 img_dir (str): Location of source image files. See ``images``.
58
59 Yields:
60 a dictionary containing image data, path and index for each line.
61 """
62 if isinstance(images, str):
63 images = DataReaderBase._read_file(images)
64
65 for i, filename in enumerate(images):
66 filename = filename.decode("utf-8").strip()
67 img_path = os.path.join(img_dir, filename)
68 if not os.path.exists(img_path):
69 img_path = filename
70
71 assert os.path.exists(img_path), \
72 'img path %s not found' % filename
73
74 if self.channel_size == 1:
75 img = transforms.ToTensor()(
76 Image.fromarray(cv2.imread(img_path, 0)))
77 else:
78 img = transforms.ToTensor()(Image.open(img_path))
79 if self.truncate and self.truncate != (0, 0):
80 if not (img.size(1) <= self.truncate[0]
81 and img.size(2) <= self.truncate[1]):
82 continue
83 yield {side: img, side + '_path': filename, 'indices': i}
84
85
86 def img_sort_key(ex):
87 """Sort using the size of the image: (width, height)."""
88 return ex.src.size(2), ex.src.size(1)
89
90
91 def batch_img(data, vocab):
92 """Pad and batch a sequence of images."""
93 c = data[0].size(0)
94 h = max([t.size(1) for t in data])
95 w = max([t.size(2) for t in data])
96 imgs = torch.zeros(len(data), c, h, w).fill_(1)
97 for i, img in enumerate(data):
98 imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
99 return imgs
100
101
102 def image_fields(**kwargs):
103 img = Field(
104 use_vocab=False, dtype=torch.float,
105 postprocessing=batch_img, sequential=False)
106 return img
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/inputters/image_dataset.py b/onmt/inputters/image_dataset.py
--- a/onmt/inputters/image_dataset.py
+++ b/onmt/inputters/image_dataset.py
@@ -75,7 +75,8 @@
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
- img = transforms.ToTensor()(Image.open(img_path))
+ img = Image.open(img_path).convert('RGB')
+ img = transforms.ToTensor()(img)
if self.truncate and self.truncate != (0, 0):
if not (img.size(1) <= self.truncate[0]
and img.size(2) <= self.truncate[1]):
| {"golden_diff": "diff --git a/onmt/inputters/image_dataset.py b/onmt/inputters/image_dataset.py\n--- a/onmt/inputters/image_dataset.py\n+++ b/onmt/inputters/image_dataset.py\n@@ -75,7 +75,8 @@\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n- img = transforms.ToTensor()(Image.open(img_path))\n+ img = Image.open(img_path).convert('RGB')\n+ img = transforms.ToTensor()(img)\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n", "issue": "Alpha channel and grayscale in image-to-text with -image_channel_size=3\nFor training image to text, the argument `-image_channel_size=3` imply that the images already have the good number of channel. However, some of my images are black and white and saved with only one channel or saved in RGB but with the alpha channel.\r\nI could fix it with a change in `onmt/inputters/image_dataset.py` [here](https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py#L78):\r\n\r\nfrom this:\r\n```\r\n if self.channel_size == 1:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 0)))\r\n else:\r\n img = transforms.ToTensor()(Image.open(img_path))\r\n```\r\nto this:\r\n```\r\n if self.channel_size == 1:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 0)))\r\n else:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 1)))\r\n```\r\nThe flag in `cv2.imread` with value of 1 tell cv2 to convert to RGB no matter what the original image is.\r\n\r\nShould I do a PR ?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport torch\nfrom torchtext.data import Field\n\nfrom onmt.inputters.datareader_base import DataReaderBase\n\n# domain specific dependencies\ntry:\n from PIL import Image\n from torchvision import transforms\n import cv2\nexcept ImportError:\n Image, transforms, cv2 = None, None, None\n\n\nclass ImageDataReader(DataReaderBase):\n \"\"\"Read image data from disk.\n\n Args:\n truncate (tuple[int] or NoneType): maximum img size. Use\n ``(0,0)`` or ``None`` for unlimited.\n channel_size (int): Number of channels per image.\n\n Raises:\n onmt.inputters.datareader_base.MissingDependencyException: If\n importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.\n \"\"\"\n\n def __init__(self, truncate=None, channel_size=3):\n self._check_deps()\n self.truncate = truncate\n self.channel_size = channel_size\n\n @classmethod\n def from_opt(cls, opt):\n return cls(channel_size=opt.image_channel_size)\n\n @classmethod\n def _check_deps(cls):\n if any([Image is None, transforms is None, cv2 is None]):\n cls._raise_missing_dep(\n \"PIL\", \"torchvision\", \"cv2\")\n\n def read(self, images, side, img_dir=None):\n \"\"\"Read data into dicts.\n\n Args:\n images (str or Iterable[str]): Sequence of image paths or\n path to file containing audio paths.\n In either case, the filenames may be relative to ``src_dir``\n (default behavior) or absolute.\n side (str): Prefix used in return dict. Usually\n ``\"src\"`` or ``\"tgt\"``.\n img_dir (str): Location of source image files. See ``images``.\n\n Yields:\n a dictionary containing image data, path and index for each line.\n \"\"\"\n if isinstance(images, str):\n images = DataReaderBase._read_file(images)\n\n for i, filename in enumerate(images):\n filename = filename.decode(\"utf-8\").strip()\n img_path = os.path.join(img_dir, filename)\n if not os.path.exists(img_path):\n img_path = filename\n\n assert os.path.exists(img_path), \\\n 'img path %s not found' % filename\n\n if self.channel_size == 1:\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n img = transforms.ToTensor()(Image.open(img_path))\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n continue\n yield {side: img, side + '_path': filename, 'indices': i}\n\n\ndef img_sort_key(ex):\n \"\"\"Sort using the size of the image: (width, height).\"\"\"\n return ex.src.size(2), ex.src.size(1)\n\n\ndef batch_img(data, vocab):\n \"\"\"Pad and batch a sequence of images.\"\"\"\n c = data[0].size(0)\n h = max([t.size(1) for t in data])\n w = max([t.size(2) for t in data])\n imgs = torch.zeros(len(data), c, h, w).fill_(1)\n for i, img in enumerate(data):\n imgs[i, :, 0:img.size(1), 0:img.size(2)] = img\n return imgs\n\n\ndef image_fields(**kwargs):\n img = Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=batch_img, sequential=False)\n return img\n", "path": "onmt/inputters/image_dataset.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport torch\nfrom torchtext.data import Field\n\nfrom onmt.inputters.datareader_base import DataReaderBase\n\n# domain specific dependencies\ntry:\n from PIL import Image\n from torchvision import transforms\n import cv2\nexcept ImportError:\n Image, transforms, cv2 = None, None, None\n\n\nclass ImageDataReader(DataReaderBase):\n \"\"\"Read image data from disk.\n\n Args:\n truncate (tuple[int] or NoneType): maximum img size. Use\n ``(0,0)`` or ``None`` for unlimited.\n channel_size (int): Number of channels per image.\n\n Raises:\n onmt.inputters.datareader_base.MissingDependencyException: If\n importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.\n \"\"\"\n\n def __init__(self, truncate=None, channel_size=3):\n self._check_deps()\n self.truncate = truncate\n self.channel_size = channel_size\n\n @classmethod\n def from_opt(cls, opt):\n return cls(channel_size=opt.image_channel_size)\n\n @classmethod\n def _check_deps(cls):\n if any([Image is None, transforms is None, cv2 is None]):\n cls._raise_missing_dep(\n \"PIL\", \"torchvision\", \"cv2\")\n\n def read(self, images, side, img_dir=None):\n \"\"\"Read data into dicts.\n\n Args:\n images (str or Iterable[str]): Sequence of image paths or\n path to file containing audio paths.\n In either case, the filenames may be relative to ``src_dir``\n (default behavior) or absolute.\n side (str): Prefix used in return dict. Usually\n ``\"src\"`` or ``\"tgt\"``.\n img_dir (str): Location of source image files. See ``images``.\n\n Yields:\n a dictionary containing image data, path and index for each line.\n \"\"\"\n if isinstance(images, str):\n images = DataReaderBase._read_file(images)\n\n for i, filename in enumerate(images):\n filename = filename.decode(\"utf-8\").strip()\n img_path = os.path.join(img_dir, filename)\n if not os.path.exists(img_path):\n img_path = filename\n\n assert os.path.exists(img_path), \\\n 'img path %s not found' % filename\n\n if self.channel_size == 1:\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n img = Image.open(img_path).convert('RGB')\n img = transforms.ToTensor()(img)\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n continue\n yield {side: img, side + '_path': filename, 'indices': i}\n\n\ndef img_sort_key(ex):\n \"\"\"Sort using the size of the image: (width, height).\"\"\"\n return ex.src.size(2), ex.src.size(1)\n\n\ndef batch_img(data, vocab):\n \"\"\"Pad and batch a sequence of images.\"\"\"\n c = data[0].size(0)\n h = max([t.size(1) for t in data])\n w = max([t.size(2) for t in data])\n imgs = torch.zeros(len(data), c, h, w).fill_(1)\n for i, img in enumerate(data):\n imgs[i, :, 0:img.size(1), 0:img.size(2)] = img\n return imgs\n\n\ndef image_fields(**kwargs):\n img = Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=batch_img, sequential=False)\n return img\n", "path": "onmt/inputters/image_dataset.py"}]} | 1,553 | 158 |
gh_patches_debug_3789 | rasdani/github-patches | git_diff | pallets__werkzeug-1564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Detect opera as browser in user_agent
### Expected Behavior
`user_agent.browser` is reported as `opera` and `user_agent.version` as `60` when browsing using Opera 60 in MacOs 10.13.5.
```python
@web.route('/test')
def test():
browser = request.user_agent.browser
version = request.user_agent.version and int(request.user_agent.version.split('.')[0])
platform = request.user_agent.platform
uas = request.user_agent.string
jsonify(dict(browser=browser, version=version, platform=platform, uas=uas))
```
### Actual Behavior
When using **Opera** the report is as follows (note the 'OPR/60'):
```
{
"browser": "chrome",
"platform": "macos",
"uas": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36 OPR/60.0.3255.95",
"version": 73
}
```
When using **Chrome** the report is:
```
{
"browser": "chrome",
"platform": "macos",
"uas": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"version": 74
}
```
### Environment
* Python version: 3.7.0
* Flask version: 1.0.2
* Werkzeug version: 0.14.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/useragents.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 werkzeug.useragents
4 ~~~~~~~~~~~~~~~~~~~
5
6 This module provides a helper to inspect user agent strings. This module
7 is far from complete but should work for most of the currently available
8 browsers.
9
10
11 :copyright: 2007 Pallets
12 :license: BSD-3-Clause
13 """
14 import re
15
16
17 class UserAgentParser(object):
18 """A simple user agent parser. Used by the `UserAgent`."""
19
20 platforms = (
21 ("cros", "chromeos"),
22 ("iphone|ios", "iphone"),
23 ("ipad", "ipad"),
24 (r"darwin|mac|os\s*x", "macos"),
25 ("win", "windows"),
26 (r"android", "android"),
27 ("netbsd", "netbsd"),
28 ("openbsd", "openbsd"),
29 ("freebsd", "freebsd"),
30 ("dragonfly", "dragonflybsd"),
31 ("(sun|i86)os", "solaris"),
32 (r"x11|lin(\b|ux)?", "linux"),
33 (r"nintendo\s+wii", "wii"),
34 ("irix", "irix"),
35 ("hp-?ux", "hpux"),
36 ("aix", "aix"),
37 ("sco|unix_sv", "sco"),
38 ("bsd", "bsd"),
39 ("amiga", "amiga"),
40 ("blackberry|playbook", "blackberry"),
41 ("symbian", "symbian"),
42 )
43 browsers = (
44 ("googlebot", "google"),
45 ("msnbot", "msn"),
46 ("yahoo", "yahoo"),
47 ("ask jeeves", "ask"),
48 (r"aol|america\s+online\s+browser", "aol"),
49 ("opera", "opera"),
50 ("edge", "edge"),
51 ("chrome|crios", "chrome"),
52 ("seamonkey", "seamonkey"),
53 ("firefox|firebird|phoenix|iceweasel", "firefox"),
54 ("galeon", "galeon"),
55 ("safari|version", "safari"),
56 ("webkit", "webkit"),
57 ("camino", "camino"),
58 ("konqueror", "konqueror"),
59 ("k-meleon", "kmeleon"),
60 ("netscape", "netscape"),
61 (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
62 ("lynx", "lynx"),
63 ("links", "links"),
64 ("Baiduspider", "baidu"),
65 ("bingbot", "bing"),
66 ("mozilla", "mozilla"),
67 )
68
69 _browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
70 _language_re = re.compile(
71 r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
72 r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
73 )
74
75 def __init__(self):
76 self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
77 self.browsers = [
78 (b, re.compile(self._browser_version_re % a, re.I))
79 for a, b in self.browsers
80 ]
81
82 def __call__(self, user_agent):
83 for platform, regex in self.platforms: # noqa: B007
84 match = regex.search(user_agent)
85 if match is not None:
86 break
87 else:
88 platform = None
89 for browser, regex in self.browsers: # noqa: B007
90 match = regex.search(user_agent)
91 if match is not None:
92 version = match.group(1)
93 break
94 else:
95 browser = version = None
96 match = self._language_re.search(user_agent)
97 if match is not None:
98 language = match.group(1) or match.group(2)
99 else:
100 language = None
101 return platform, browser, version, language
102
103
104 class UserAgent(object):
105 """Represents a user agent. Pass it a WSGI environment or a user agent
106 string and you can inspect some of the details from the user agent
107 string via the attributes. The following attributes exist:
108
109 .. attribute:: string
110
111 the raw user agent string
112
113 .. attribute:: platform
114
115 the browser platform. The following platforms are currently
116 recognized:
117
118 - `aix`
119 - `amiga`
120 - `android`
121 - `blackberry`
122 - `bsd`
123 - `chromeos`
124 - `dragonflybsd`
125 - `freebsd`
126 - `hpux`
127 - `ipad`
128 - `iphone`
129 - `irix`
130 - `linux`
131 - `macos`
132 - `netbsd`
133 - `openbsd`
134 - `sco`
135 - `solaris`
136 - `symbian`
137 - `wii`
138 - `windows`
139
140 .. attribute:: browser
141
142 the name of the browser. The following browsers are currently
143 recognized:
144
145 - `aol` *
146 - `ask` *
147 - `baidu` *
148 - `bing` *
149 - `camino`
150 - `chrome`
151 - `edge`
152 - `firefox`
153 - `galeon`
154 - `google` *
155 - `kmeleon`
156 - `konqueror`
157 - `links`
158 - `lynx`
159 - `mozilla`
160 - `msie`
161 - `msn`
162 - `netscape`
163 - `opera`
164 - `safari`
165 - `seamonkey`
166 - `webkit`
167 - `yahoo` *
168
169 (Browsers marked with a star (``*``) are crawlers.)
170
171 .. attribute:: version
172
173 the version of the browser
174
175 .. attribute:: language
176
177 the language of the browser
178 """
179
180 _parser = UserAgentParser()
181
182 def __init__(self, environ_or_string):
183 if isinstance(environ_or_string, dict):
184 environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
185 self.string = environ_or_string
186 self.platform, self.browser, self.version, self.language = self._parser(
187 environ_or_string
188 )
189
190 def to_header(self):
191 return self.string
192
193 def __str__(self):
194 return self.string
195
196 def __nonzero__(self):
197 return bool(self.browser)
198
199 __bool__ = __nonzero__
200
201 def __repr__(self):
202 return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py
--- a/src/werkzeug/useragents.py
+++ b/src/werkzeug/useragents.py
@@ -46,7 +46,7 @@
("yahoo", "yahoo"),
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
- ("opera", "opera"),
+ (r"opera|opr", "opera"),
("edge", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
| {"golden_diff": "diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py\n--- a/src/werkzeug/useragents.py\n+++ b/src/werkzeug/useragents.py\n@@ -46,7 +46,7 @@\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n- (\"opera\", \"opera\"),\n+ (r\"opera|opr\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n", "issue": "Detect opera as browser in user_agent\n### Expected Behavior\r\n\r\n`user_agent.browser` is reported as `opera` and `user_agent.version` as `60` when browsing using Opera 60 in MacOs 10.13.5.\r\n\r\n```python\r\[email protected]('/test')\r\ndef test():\r\n browser = request.user_agent.browser\r\n version = request.user_agent.version and int(request.user_agent.version.split('.')[0])\r\n platform = request.user_agent.platform\r\n uas = request.user_agent.string\r\n jsonify(dict(browser=browser, version=version, platform=platform, uas=uas))\r\n```\r\n\r\n### Actual Behavior\r\n\r\nWhen using **Opera** the report is as follows (note the 'OPR/60'):\r\n\r\n```\r\n{\r\n \"browser\": \"chrome\", \r\n \"platform\": \"macos\", \r\n \"uas\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36 OPR/60.0.3255.95\", \r\n \"version\": 73\r\n}\r\n```\r\n\r\nWhen using **Chrome** the report is:\r\n\r\n```\r\n{\r\n \"browser\": \"chrome\", \r\n \"platform\": \"macos\", \r\n \"uas\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\", \r\n \"version\": 74\r\n}\r\n```\r\n\r\n### Environment\r\n\r\n* Python version: 3.7.0\r\n* Flask version: 1.0.2\r\n* Werkzeug version: 0.14.1\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\"cros\", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (\"opera\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. The following platforms are currently\n recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. The following browsers are currently\n recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser\n\n .. attribute:: language\n\n the language of the browser\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n", "path": "src/werkzeug/useragents.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\"cros\", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. The following platforms are currently\n recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. The following browsers are currently\n recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser\n\n .. attribute:: language\n\n the language of the browser\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n", "path": "src/werkzeug/useragents.py"}]} | 2,744 | 140 |
gh_patches_debug_39793 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tijuanaflats is broken
During the global build at 2021-05-26-14-42-23, spider **tijuanaflats** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tijuanaflats.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tijuanaflats.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4
5 from locations.items import GeojsonPointItem
6
7
8 class TijuanaFlatsSpider(scrapy.Spider):
9 name = "tijuanaflats"
10 item_attributes = { 'brand': "Tijuana Flats" }
11 allowed_domains = ['tijuanaflats.com']
12 start_urls = (
13 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',
14 )
15
16 def parse(self, response):
17 response.selector.remove_namespaces()
18 city_urls = response.xpath('//url/loc/text()').extract()
19 for path in city_urls:
20 yield scrapy.Request(
21 path.strip(),
22 callback=self.parse_store,
23 )
24
25 def parse_store(self, response):
26
27 if response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract():
28 storeHours = str(response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract())
29 storeHours = storeHours.replace('[','').replace(']','').replace("'",'').replace(',',' - ')
30 else:
31 storeHours = response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract()
32
33
34 properties = {
35 'name': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
36 'website': response.request.url,
37 'ref': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
38 'addr_full': response.xpath('//div[@class="wpsl-location-address"]/span[1]/text()').extract_first() + " " + response.xpath('//div[@class="wpsl-location-address"]/span[2]/text()').extract_first(),
39 'city': response.xpath('//div[@class="wpsl-location-address"]/span[3]/text()').extract_first().rstrip(', '),
40 'state': response.xpath('//div[@class="wpsl-location-address"]/span[4]/text()').extract_first().strip(),
41 'postcode': response.xpath('//div[@class="wpsl-location-address"]/span[5]/text()').extract_first().strip(),
42 'opening_hours': storeHours,
43 'lat': float(response.xpath('//script/text()').extract()[-3].split('"lat":"')[1].split('"')[0]),
44 'lon': float(response.xpath('//script/text()').extract()[-3].split('"lng":"')[1].split('"')[0]),
45 }
46
47 yield GeojsonPointItem(**properties)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/tijuanaflats.py b/locations/spiders/tijuanaflats.py
--- a/locations/spiders/tijuanaflats.py
+++ b/locations/spiders/tijuanaflats.py
@@ -1,47 +1,45 @@
# -*- coding: utf-8 -*-
+import json
+
import scrapy
-import re
from locations.items import GeojsonPointItem
class TijuanaFlatsSpider(scrapy.Spider):
name = "tijuanaflats"
- item_attributes = { 'brand': "Tijuana Flats" }
- allowed_domains = ['tijuanaflats.com']
- start_urls = (
- 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',
- )
+ item_attributes = {"brand": "Tijuana Flats", "brand_wikidata": "Q7801833"}
+ allowed_domains = ["tijuanaflats.com"]
+ start_urls = ("https://www.tijuanaflats.com/locations",)
def parse(self, response):
- response.selector.remove_namespaces()
- city_urls = response.xpath('//url/loc/text()').extract()
- for path in city_urls:
- yield scrapy.Request(
- path.strip(),
- callback=self.parse_store,
+ data = json.loads(
+ response.xpath(
+ '//tjs-view-locations/attribute::*[name()=":locations"]'
+ ).extract_first()
+ )
+ for row in data:
+ for ent in row["yoast_json_ld"][0]["@graph"]:
+ if ent["@type"] == "WebPage" and row["slug"] in ent["url"]:
+ name = ent["name"]
+
+ # extract text from html snippet
+ hours_of_operation = scrapy.Selector(text=row["acf"]["hours_of_operation"])
+ opening_hours = "; ".join(
+ a.strip() for a in hours_of_operation.xpath("//text()").extract()
)
- def parse_store(self, response):
-
- if response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract():
- storeHours = str(response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract())
- storeHours = storeHours.replace('[','').replace(']','').replace("'",'').replace(',',' - ')
- else:
- storeHours = response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract()
-
-
- properties = {
- 'name': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
- 'website': response.request.url,
- 'ref': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
- 'addr_full': response.xpath('//div[@class="wpsl-location-address"]/span[1]/text()').extract_first() + " " + response.xpath('//div[@class="wpsl-location-address"]/span[2]/text()').extract_first(),
- 'city': response.xpath('//div[@class="wpsl-location-address"]/span[3]/text()').extract_first().rstrip(', '),
- 'state': response.xpath('//div[@class="wpsl-location-address"]/span[4]/text()').extract_first().strip(),
- 'postcode': response.xpath('//div[@class="wpsl-location-address"]/span[5]/text()').extract_first().strip(),
- 'opening_hours': storeHours,
- 'lat': float(response.xpath('//script/text()').extract()[-3].split('"lat":"')[1].split('"')[0]),
- 'lon': float(response.xpath('//script/text()').extract()[-3].split('"lng":"')[1].split('"')[0]),
- }
-
- yield GeojsonPointItem(**properties)
\ No newline at end of file
+ properties = {
+ "ref": row["slug"],
+ "name": name,
+ "lat": row["acf"]["physical_location"]["lat"],
+ "lon": row["acf"]["physical_location"]["lng"],
+ "addr_full": row["acf"]["address_1"],
+ "city": row["acf"]["city"],
+ "state": row["acf"]["state"],
+ "postcode": row["acf"]["zip"],
+ "phone": row["acf"]["contact_phone"],
+ "website": f'https://www.tijuanaflats.com/locations/{row["slug"]}',
+ "opening_hours": opening_hours,
+ }
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/tijuanaflats.py b/locations/spiders/tijuanaflats.py\n--- a/locations/spiders/tijuanaflats.py\n+++ b/locations/spiders/tijuanaflats.py\n@@ -1,47 +1,45 @@\n # -*- coding: utf-8 -*-\n+import json\n+\n import scrapy\n-import re\n \n from locations.items import GeojsonPointItem\n \n \n class TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n- item_attributes = { 'brand': \"Tijuana Flats\" }\n- allowed_domains = ['tijuanaflats.com']\n- start_urls = (\n- 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',\n- )\n+ item_attributes = {\"brand\": \"Tijuana Flats\", \"brand_wikidata\": \"Q7801833\"}\n+ allowed_domains = [\"tijuanaflats.com\"]\n+ start_urls = (\"https://www.tijuanaflats.com/locations\",)\n \n def parse(self, response):\n- response.selector.remove_namespaces()\n- city_urls = response.xpath('//url/loc/text()').extract()\n- for path in city_urls:\n- yield scrapy.Request(\n- path.strip(),\n- callback=self.parse_store,\n+ data = json.loads(\n+ response.xpath(\n+ '//tjs-view-locations/attribute::*[name()=\":locations\"]'\n+ ).extract_first()\n+ )\n+ for row in data:\n+ for ent in row[\"yoast_json_ld\"][0][\"@graph\"]:\n+ if ent[\"@type\"] == \"WebPage\" and row[\"slug\"] in ent[\"url\"]:\n+ name = ent[\"name\"]\n+\n+ # extract text from html snippet\n+ hours_of_operation = scrapy.Selector(text=row[\"acf\"][\"hours_of_operation\"])\n+ opening_hours = \"; \".join(\n+ a.strip() for a in hours_of_operation.xpath(\"//text()\").extract()\n )\n \n- def parse_store(self, response):\n-\n- if response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract():\n- storeHours = str(response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract())\n- storeHours = storeHours.replace('[','').replace(']','').replace(\"'\",'').replace(',',' - ')\n- else:\n- storeHours = response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract()\n-\n-\n- properties = {\n- 'name': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n- 'website': response.request.url,\n- 'ref': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//div[@class=\"wpsl-location-address\"]/span[1]/text()').extract_first() + \" \" + response.xpath('//div[@class=\"wpsl-location-address\"]/span[2]/text()').extract_first(),\n- 'city': response.xpath('//div[@class=\"wpsl-location-address\"]/span[3]/text()').extract_first().rstrip(', '),\n- 'state': response.xpath('//div[@class=\"wpsl-location-address\"]/span[4]/text()').extract_first().strip(),\n- 'postcode': response.xpath('//div[@class=\"wpsl-location-address\"]/span[5]/text()').extract_first().strip(),\n- 'opening_hours': storeHours,\n- 'lat': float(response.xpath('//script/text()').extract()[-3].split('\"lat\":\"')[1].split('\"')[0]),\n- 'lon': float(response.xpath('//script/text()').extract()[-3].split('\"lng\":\"')[1].split('\"')[0]),\n- }\n-\n- yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n+ properties = {\n+ \"ref\": row[\"slug\"],\n+ \"name\": name,\n+ \"lat\": row[\"acf\"][\"physical_location\"][\"lat\"],\n+ \"lon\": row[\"acf\"][\"physical_location\"][\"lng\"],\n+ \"addr_full\": row[\"acf\"][\"address_1\"],\n+ \"city\": row[\"acf\"][\"city\"],\n+ \"state\": row[\"acf\"][\"state\"],\n+ \"postcode\": row[\"acf\"][\"zip\"],\n+ \"phone\": row[\"acf\"][\"contact_phone\"],\n+ \"website\": f'https://www.tijuanaflats.com/locations/{row[\"slug\"]}',\n+ \"opening_hours\": opening_hours,\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider tijuanaflats is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tijuanaflats** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tijuanaflats.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n item_attributes = { 'brand': \"Tijuana Flats\" }\n allowed_domains = ['tijuanaflats.com']\n start_urls = (\n 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n if response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract():\n storeHours = str(response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract())\n storeHours = storeHours.replace('[','').replace(']','').replace(\"'\",'').replace(',',' - ')\n else:\n storeHours = response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract()\n\n\n properties = {\n 'name': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n 'website': response.request.url,\n 'ref': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n 'addr_full': response.xpath('//div[@class=\"wpsl-location-address\"]/span[1]/text()').extract_first() + \" \" + response.xpath('//div[@class=\"wpsl-location-address\"]/span[2]/text()').extract_first(),\n 'city': response.xpath('//div[@class=\"wpsl-location-address\"]/span[3]/text()').extract_first().rstrip(', '),\n 'state': response.xpath('//div[@class=\"wpsl-location-address\"]/span[4]/text()').extract_first().strip(),\n 'postcode': response.xpath('//div[@class=\"wpsl-location-address\"]/span[5]/text()').extract_first().strip(),\n 'opening_hours': storeHours,\n 'lat': float(response.xpath('//script/text()').extract()[-3].split('\"lat\":\"')[1].split('\"')[0]),\n 'lon': float(response.xpath('//script/text()').extract()[-3].split('\"lng\":\"')[1].split('\"')[0]),\n }\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/tijuanaflats.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n item_attributes = {\"brand\": \"Tijuana Flats\", \"brand_wikidata\": \"Q7801833\"}\n allowed_domains = [\"tijuanaflats.com\"]\n start_urls = (\"https://www.tijuanaflats.com/locations\",)\n\n def parse(self, response):\n data = json.loads(\n response.xpath(\n '//tjs-view-locations/attribute::*[name()=\":locations\"]'\n ).extract_first()\n )\n for row in data:\n for ent in row[\"yoast_json_ld\"][0][\"@graph\"]:\n if ent[\"@type\"] == \"WebPage\" and row[\"slug\"] in ent[\"url\"]:\n name = ent[\"name\"]\n\n # extract text from html snippet\n hours_of_operation = scrapy.Selector(text=row[\"acf\"][\"hours_of_operation\"])\n opening_hours = \"; \".join(\n a.strip() for a in hours_of_operation.xpath(\"//text()\").extract()\n )\n\n properties = {\n \"ref\": row[\"slug\"],\n \"name\": name,\n \"lat\": row[\"acf\"][\"physical_location\"][\"lat\"],\n \"lon\": row[\"acf\"][\"physical_location\"][\"lng\"],\n \"addr_full\": row[\"acf\"][\"address_1\"],\n \"city\": row[\"acf\"][\"city\"],\n \"state\": row[\"acf\"][\"state\"],\n \"postcode\": row[\"acf\"][\"zip\"],\n \"phone\": row[\"acf\"][\"contact_phone\"],\n \"website\": f'https://www.tijuanaflats.com/locations/{row[\"slug\"]}',\n \"opening_hours\": opening_hours,\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tijuanaflats.py"}]} | 1,075 | 1,003 |
gh_patches_debug_836 | rasdani/github-patches | git_diff | lutris__lutris-2472 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't show Steam Linux Runtime when importing games
Link to the tool on steamdb: https://steamdb.info/app/1070560/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/services/steam.py`
Content:
```
1 """Steam service"""
2 import os
3 import re
4
5 from lutris import pga
6 from lutris.config import make_game_config_id, LutrisConfig
7 from lutris.util.steam.appmanifest import AppManifest, get_appmanifests
8 from lutris.util.steam.config import get_steamapps_paths
9 from lutris.services.service_game import ServiceGame
10
11 NAME = "Steam"
12 ICON = "steam"
13 ONLINE = False
14
15
16 class SteamGame(ServiceGame):
17 """ServiceGame for Steam games"""
18
19 store = "steam"
20 installer_slug = "steam"
21 excluded_appids = [
22 "228980", # Steamworks Common Redistributables
23 ]
24
25 @classmethod
26 def new_from_steam_game(cls, appmanifest, game_id=None):
27 """Return a Steam game instance from an AppManifest"""
28 steam_game = SteamGame()
29 steam_game.appid = str(appmanifest.steamid)
30 steam_game.game_id = game_id
31 steam_game.name = appmanifest.name
32 steam_game.slug = appmanifest.slug
33 steam_game.runner = appmanifest.get_runner_name()
34 return steam_game
35
36 @classmethod
37 def new_from_lutris_id(cls, game_id):
38 steam_game = SteamGame()
39 steam_game.game_id = game_id
40 return steam_game
41
42 @property
43 def config_id(self):
44 return make_game_config_id(self.slug)
45
46 @classmethod
47 def is_importable(cls, appmanifest):
48 """Return whether a Steam game should be imported"""
49 if not appmanifest.is_installed():
50 return False
51 if appmanifest.steamid in cls.excluded_appids:
52 return False
53 if re.match(r"^Proton \d*", appmanifest.name):
54 return False
55 return True
56
57 def install(self, updated_info=None):
58 """Add an installed game to the library
59
60 Params:
61 updated_info (dict): Optional dictonary containing existing data not to overwrite
62 """
63 if updated_info:
64 name = updated_info["name"]
65 slug = updated_info["slug"]
66 else:
67 name = self.name
68 slug = self.slug
69 self.game_id = pga.add_or_update(
70 id=self.game_id,
71 name=name,
72 runner=self.runner,
73 slug=slug,
74 steamid=int(self.appid),
75 installed=1,
76 configpath=self.config_id,
77 installer_slug=self.installer_slug,
78 )
79 self.create_config()
80 return self.game_id
81
82 def create_config(self):
83 """Create the game configuration for a Steam game"""
84 game_config = LutrisConfig(
85 runner_slug=self.runner, game_config_id=self.config_id
86 )
87 game_config.raw_game_config.update({"appid": self.appid})
88 game_config.save()
89
90
91 class SteamSyncer:
92 platform = "linux"
93
94 def __init__(self):
95 self._lutris_games = None
96 self._lutris_steamids = None
97
98 @property
99 def runner(self):
100 return "steam" if self.platform == "linux" else "winesteam"
101
102 @property
103 def lutris_games(self):
104 if not self._lutris_games:
105 self._lutris_games = pga.get_games_where(
106 steamid__isnull=False, steamid__not=""
107 )
108 return self._lutris_games
109
110 @property
111 def lutris_steamids(self):
112 if not self._lutris_steamids:
113 self._lutris_steamids = {str(game["steamid"]) for game in self.lutris_games}
114 return self._lutris_steamids
115
116 def load(self, force_reload=False):
117 """Return importable Steam games"""
118 games = []
119 steamapps_paths = get_steamapps_paths()
120 for steamapps_path in steamapps_paths[self.platform]:
121 for appmanifest_file in get_appmanifests(steamapps_path):
122 app_manifest = AppManifest(
123 os.path.join(steamapps_path, appmanifest_file)
124 )
125 if SteamGame.is_importable(app_manifest):
126 games.append(SteamGame.new_from_steam_game(app_manifest))
127 return games
128
129 def get_pga_game(self, game):
130 """Return a PGA game if one is found"""
131 for pga_game in self.lutris_games:
132 if (
133 str(pga_game["steamid"]) == game.appid
134 and (pga_game["runner"] == self.runner or not pga_game["runner"])
135 and not pga_game["installed"]
136 ):
137 return pga_game
138
139 def sync(self, games, full=False):
140 """Syncs Steam games to Lutris"""
141 available_ids = set() # Set of Steam appids seen while browsing AppManifests
142 added_games = []
143 for game in games:
144 steamid = game.appid
145 available_ids.add(steamid)
146 pga_game = self.get_pga_game(game)
147
148 if pga_game:
149 if (
150 steamid in self.lutris_steamids
151 and pga_game["installed"] != 1
152 and pga_game["installed"]
153 ):
154 added_games.append(game.install())
155
156 if steamid not in self.lutris_steamids:
157 added_games.append(game.install())
158 else:
159 if pga_game:
160 added_games.append(game.install(pga_game))
161
162 if not full:
163 return added_games, games
164
165 removed_games = []
166 unavailable_ids = self.lutris_steamids.difference(available_ids)
167 for steamid in unavailable_ids:
168 for pga_game in self.lutris_games:
169 if (
170 str(pga_game["steamid"]) == steamid
171 and pga_game["installed"]
172 and pga_game["runner"] == self.runner
173 ):
174 game = SteamGame.new_from_lutris_id(pga_game["id"])
175 game.uninstall()
176 removed_games.append(pga_game["id"])
177 return (added_games, removed_games)
178
179
180 SYNCER = SteamSyncer
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/services/steam.py b/lutris/services/steam.py
--- a/lutris/services/steam.py
+++ b/lutris/services/steam.py
@@ -20,6 +20,7 @@
installer_slug = "steam"
excluded_appids = [
"228980", # Steamworks Common Redistributables
+ "1070560", # Steam Linux Runtime
]
@classmethod
| {"golden_diff": "diff --git a/lutris/services/steam.py b/lutris/services/steam.py\n--- a/lutris/services/steam.py\n+++ b/lutris/services/steam.py\n@@ -20,6 +20,7 @@\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n+ \"1070560\", # Steam Linux Runtime\n ]\n \n @classmethod\n", "issue": "Don't show Steam Linux Runtime when importing games\nLink to the tool on steamdb: https://steamdb.info/app/1070560/\n", "before_files": [{"content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_game import ServiceGame\n\nNAME = \"Steam\"\nICON = \"steam\"\nONLINE = False\n\n\nclass SteamGame(ServiceGame):\n \"\"\"ServiceGame for Steam games\"\"\"\n\n store = \"steam\"\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n ]\n\n @classmethod\n def new_from_steam_game(cls, appmanifest, game_id=None):\n \"\"\"Return a Steam game instance from an AppManifest\"\"\"\n steam_game = SteamGame()\n steam_game.appid = str(appmanifest.steamid)\n steam_game.game_id = game_id\n steam_game.name = appmanifest.name\n steam_game.slug = appmanifest.slug\n steam_game.runner = appmanifest.get_runner_name()\n return steam_game\n\n @classmethod\n def new_from_lutris_id(cls, game_id):\n steam_game = SteamGame()\n steam_game.game_id = game_id\n return steam_game\n\n @property\n def config_id(self):\n return make_game_config_id(self.slug)\n\n @classmethod\n def is_importable(cls, appmanifest):\n \"\"\"Return whether a Steam game should be imported\"\"\"\n if not appmanifest.is_installed():\n return False\n if appmanifest.steamid in cls.excluded_appids:\n return False\n if re.match(r\"^Proton \\d*\", appmanifest.name):\n return False\n return True\n\n def install(self, updated_info=None):\n \"\"\"Add an installed game to the library\n\n Params:\n updated_info (dict): Optional dictonary containing existing data not to overwrite\n \"\"\"\n if updated_info:\n name = updated_info[\"name\"]\n slug = updated_info[\"slug\"]\n else:\n name = self.name\n slug = self.slug\n self.game_id = pga.add_or_update(\n id=self.game_id,\n name=name,\n runner=self.runner,\n slug=slug,\n steamid=int(self.appid),\n installed=1,\n configpath=self.config_id,\n installer_slug=self.installer_slug,\n )\n self.create_config()\n return self.game_id\n\n def create_config(self):\n \"\"\"Create the game configuration for a Steam game\"\"\"\n game_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=self.config_id\n )\n game_config.raw_game_config.update({\"appid\": self.appid})\n game_config.save()\n\n\nclass SteamSyncer:\n platform = \"linux\"\n\n def __init__(self):\n self._lutris_games = None\n self._lutris_steamids = None\n\n @property\n def runner(self):\n return \"steam\" if self.platform == \"linux\" else \"winesteam\"\n\n @property\n def lutris_games(self):\n if not self._lutris_games:\n self._lutris_games = pga.get_games_where(\n steamid__isnull=False, steamid__not=\"\"\n )\n return self._lutris_games\n\n @property\n def lutris_steamids(self):\n if not self._lutris_steamids:\n self._lutris_steamids = {str(game[\"steamid\"]) for game in self.lutris_games}\n return self._lutris_steamids\n\n def load(self, force_reload=False):\n \"\"\"Return importable Steam games\"\"\"\n games = []\n steamapps_paths = get_steamapps_paths()\n for steamapps_path in steamapps_paths[self.platform]:\n for appmanifest_file in get_appmanifests(steamapps_path):\n app_manifest = AppManifest(\n os.path.join(steamapps_path, appmanifest_file)\n )\n if SteamGame.is_importable(app_manifest):\n games.append(SteamGame.new_from_steam_game(app_manifest))\n return games\n\n def get_pga_game(self, game):\n \"\"\"Return a PGA game if one is found\"\"\"\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == game.appid\n and (pga_game[\"runner\"] == self.runner or not pga_game[\"runner\"])\n and not pga_game[\"installed\"]\n ):\n return pga_game\n\n def sync(self, games, full=False):\n \"\"\"Syncs Steam games to Lutris\"\"\"\n available_ids = set() # Set of Steam appids seen while browsing AppManifests\n added_games = []\n for game in games:\n steamid = game.appid\n available_ids.add(steamid)\n pga_game = self.get_pga_game(game)\n\n if pga_game:\n if (\n steamid in self.lutris_steamids\n and pga_game[\"installed\"] != 1\n and pga_game[\"installed\"]\n ):\n added_games.append(game.install())\n\n if steamid not in self.lutris_steamids:\n added_games.append(game.install())\n else:\n if pga_game:\n added_games.append(game.install(pga_game))\n\n if not full:\n return added_games, games\n\n removed_games = []\n unavailable_ids = self.lutris_steamids.difference(available_ids)\n for steamid in unavailable_ids:\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == steamid\n and pga_game[\"installed\"]\n and pga_game[\"runner\"] == self.runner\n ):\n game = SteamGame.new_from_lutris_id(pga_game[\"id\"])\n game.uninstall()\n removed_games.append(pga_game[\"id\"])\n return (added_games, removed_games)\n\n\nSYNCER = SteamSyncer\n", "path": "lutris/services/steam.py"}], "after_files": [{"content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_game import ServiceGame\n\nNAME = \"Steam\"\nICON = \"steam\"\nONLINE = False\n\n\nclass SteamGame(ServiceGame):\n \"\"\"ServiceGame for Steam games\"\"\"\n\n store = \"steam\"\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n \"1070560\", # Steam Linux Runtime\n ]\n\n @classmethod\n def new_from_steam_game(cls, appmanifest, game_id=None):\n \"\"\"Return a Steam game instance from an AppManifest\"\"\"\n steam_game = SteamGame()\n steam_game.appid = str(appmanifest.steamid)\n steam_game.game_id = game_id\n steam_game.name = appmanifest.name\n steam_game.slug = appmanifest.slug\n steam_game.runner = appmanifest.get_runner_name()\n return steam_game\n\n @classmethod\n def new_from_lutris_id(cls, game_id):\n steam_game = SteamGame()\n steam_game.game_id = game_id\n return steam_game\n\n @property\n def config_id(self):\n return make_game_config_id(self.slug)\n\n @classmethod\n def is_importable(cls, appmanifest):\n \"\"\"Return whether a Steam game should be imported\"\"\"\n if not appmanifest.is_installed():\n return False\n if appmanifest.steamid in cls.excluded_appids:\n return False\n if re.match(r\"^Proton \\d*\", appmanifest.name):\n return False\n return True\n\n def install(self, updated_info=None):\n \"\"\"Add an installed game to the library\n\n Params:\n updated_info (dict): Optional dictonary containing existing data not to overwrite\n \"\"\"\n if updated_info:\n name = updated_info[\"name\"]\n slug = updated_info[\"slug\"]\n else:\n name = self.name\n slug = self.slug\n self.game_id = pga.add_or_update(\n id=self.game_id,\n name=name,\n runner=self.runner,\n slug=slug,\n steamid=int(self.appid),\n installed=1,\n configpath=self.config_id,\n installer_slug=self.installer_slug,\n )\n self.create_config()\n return self.game_id\n\n def create_config(self):\n \"\"\"Create the game configuration for a Steam game\"\"\"\n game_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=self.config_id\n )\n game_config.raw_game_config.update({\"appid\": self.appid})\n game_config.save()\n\n\nclass SteamSyncer:\n platform = \"linux\"\n\n def __init__(self):\n self._lutris_games = None\n self._lutris_steamids = None\n\n @property\n def runner(self):\n return \"steam\" if self.platform == \"linux\" else \"winesteam\"\n\n @property\n def lutris_games(self):\n if not self._lutris_games:\n self._lutris_games = pga.get_games_where(\n steamid__isnull=False, steamid__not=\"\"\n )\n return self._lutris_games\n\n @property\n def lutris_steamids(self):\n if not self._lutris_steamids:\n self._lutris_steamids = {str(game[\"steamid\"]) for game in self.lutris_games}\n return self._lutris_steamids\n\n def load(self, force_reload=False):\n \"\"\"Return importable Steam games\"\"\"\n games = []\n steamapps_paths = get_steamapps_paths()\n for steamapps_path in steamapps_paths[self.platform]:\n for appmanifest_file in get_appmanifests(steamapps_path):\n app_manifest = AppManifest(\n os.path.join(steamapps_path, appmanifest_file)\n )\n if SteamGame.is_importable(app_manifest):\n games.append(SteamGame.new_from_steam_game(app_manifest))\n return games\n\n def get_pga_game(self, game):\n \"\"\"Return a PGA game if one is found\"\"\"\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == game.appid\n and (pga_game[\"runner\"] == self.runner or not pga_game[\"runner\"])\n and not pga_game[\"installed\"]\n ):\n return pga_game\n\n def sync(self, games, full=False):\n \"\"\"Syncs Steam games to Lutris\"\"\"\n available_ids = set() # Set of Steam appids seen while browsing AppManifests\n added_games = []\n for game in games:\n steamid = game.appid\n available_ids.add(steamid)\n pga_game = self.get_pga_game(game)\n\n if pga_game:\n if (\n steamid in self.lutris_steamids\n and pga_game[\"installed\"] != 1\n and pga_game[\"installed\"]\n ):\n added_games.append(game.install())\n\n if steamid not in self.lutris_steamids:\n added_games.append(game.install())\n else:\n if pga_game:\n added_games.append(game.install(pga_game))\n\n if not full:\n return added_games, games\n\n removed_games = []\n unavailable_ids = self.lutris_steamids.difference(available_ids)\n for steamid in unavailable_ids:\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == steamid\n and pga_game[\"installed\"]\n and pga_game[\"runner\"] == self.runner\n ):\n game = SteamGame.new_from_lutris_id(pga_game[\"id\"])\n game.uninstall()\n removed_games.append(pga_game[\"id\"])\n return (added_games, removed_games)\n\n\nSYNCER = SteamSyncer\n", "path": "lutris/services/steam.py"}]} | 2,020 | 107 |
gh_patches_debug_27088 | rasdani/github-patches | git_diff | coala__coala-5000 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change default_coafile naming convention.
Should we change the name of `default_coafile` in `coalib` to `system_coafile`?
It will avoid confusion due to the following lines in `Constants.py`
```
system_coafile = os.path.join(coalib_root, 'default_coafile')
user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')
default_coafile = '.coafile'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/misc/Constants.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import appdirs
4 import os
5 import re
6
7 from coalib import VERSION
8
9
10 THIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '
11 'Please contact the developers for assistance.')
12
13 CRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '
14 'sorry for the inconvenience. Please contact the '
15 'developers for assistance. During execution of '
16 'coala an exception was raised. This should never '
17 'happen. When asked for, the following information '
18 'may help investigating:')
19
20 VERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '
21 'dependency you have installed and the '
22 'requirements of coala. This may be resolved by '
23 'creating a separate virtual environment for '
24 'coala or running `pip3 install "%s"`. Be aware '
25 'that the latter solution might break other '
26 'python packages that depend on the currently '
27 'installed version.')
28
29 OBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'
30
31 TRUE_STRINGS = ['1',
32 'on',
33 'okay',
34 'ok',
35 'okey-dokey',
36 'y',
37 'yes',
38 'yeah',
39 'yea',
40 'ya',
41 'ye',
42 'yessir',
43 'sure',
44 'true',
45 'tru',
46 'uh-huh',
47 'definitely',
48 'yup',
49 'yep',
50 'right',
51 'aye',
52 'alright',
53 'alrighty',
54 'hell yeah',
55 'affirmative',
56 'certainly',
57 'definitely',
58 'absolutely',
59 'roger',
60 'righto',
61 'ja',
62 'da',
63 'si',
64 'oui',
65 'amen',
66 'totally',
67 '10-4',
68 'positive']
69
70 FALSE_STRINGS = ['0',
71 'off',
72 'n',
73 'no',
74 'nix',
75 'nope',
76 'nop',
77 'nah',
78 'nay',
79 'false',
80 'uh-uh',
81 'wrong',
82 'none',
83 'nay',
84 'hell no',
85 'fat chance',
86 'not a chance in hell',
87 'not in a million years',
88 'out of the question',
89 'no siree',
90 'no way',
91 'nein',
92 'njet',
93 'nee',
94 'non',
95 'hakuna',
96 'negative']
97
98 # This string contains many unicode characters to challenge tests.
99 COMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 ÄÖü ABc @€¥ §&% {[( ←↓→↑ '
100 'ĦŊħ ß°^ \\\n\u2192')
101
102 # Path to the coalib directory
103 coalib_root = os.path.join(os.path.dirname(__file__),
104 os.path.pardir)
105
106 # Path to the language definition files
107 language_definitions = os.path.join(coalib_root,
108 'bearlib',
109 'languages',
110 'definitions')
111
112 system_coafile = os.path.join(coalib_root, 'default_coafile')
113
114 user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')
115
116 default_coafile = '.coafile'
117
118 USER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)
119
120 GLOBBING_SPECIAL_CHARS = '()[]|?*'
121
122 URL_REGEX = re.compile(
123 r'^(?:(?:http|ftp)[s]?://)?' # scheme
124 r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+' # domain name
125 r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
126 r'localhost|' # OR localhost
127 r'(?:\d{1,3}\.){3}\d{1,3})' # OR an ip
128 r'(?::\d+)?' # optional port number
129 r'(?:/?|[/?]\S+)$', # path
130 re.IGNORECASE)
131
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import datetime
4 import locale
5 import platform
6 import sys
7 from os import getenv
8 from subprocess import call
9
10 import setuptools.command.build_py
11 from setuptools import find_packages, setup
12 from setuptools.command.test import test as TestCommand
13
14 from coalib import VERSION, assert_supported_version, get_version
15 from coalib.misc.BuildManPage import BuildManPage
16
17 try:
18 lc = locale.getlocale()
19 pf = platform.system()
20 if pf != 'Windows' and lc == (None, None):
21 locale.setlocale(locale.LC_ALL, 'C.UTF-8')
22 except (ValueError, UnicodeError):
23 locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
24
25
26 assert_supported_version()
27
28
29 class BuildPyCommand(setuptools.command.build_py.build_py):
30
31 def run(self):
32 if platform.system() != 'Windows':
33 self.run_command('build_manpage')
34 setuptools.command.build_py.build_py.run(self)
35
36
37 class PyTestCommand(TestCommand):
38
39 def run_tests(self):
40 # import here, cause outside the eggs aren't loaded
41 import pytest
42 errno = pytest.main([])
43 sys.exit(errno)
44
45
46 class BuildDocsCommand(setuptools.command.build_py.build_py):
47 apidoc_command = (
48 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'
49 )
50 doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')
51
52 def run(self):
53 errOne = call(self.apidoc_command)
54 errTwo = call(self.doc_command)
55 sys.exit(errOne or errTwo)
56
57
58 # Generate API documentation only if we are running on readthedocs.io
59 on_rtd = getenv('READTHEDOCS', None) is not None
60 if on_rtd:
61 call(BuildDocsCommand.apidoc_command)
62 if 'dev' in VERSION:
63 current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
64 call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',
65 '-b {}'.format(current_version)])
66 VERSION = get_version()
67
68 with open('requirements.txt') as requirements:
69 required = requirements.read().splitlines()
70
71 with open('test-requirements.txt') as requirements:
72 test_required = requirements.read().splitlines()
73
74 with open('README.rst') as readme:
75 long_description = readme.read()
76
77
78 if __name__ == '__main__':
79 if platform.system() != 'Windows':
80 data_files = [('.', ['coala.1'])]
81 else:
82 data_files = [('.', [])]
83
84 setup(name='coala',
85 version=VERSION,
86 description='Linting and Fixing Code for All Languages',
87 author='The coala developers',
88 author_email='[email protected]',
89 maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\xfcger'
90 if not on_rtd else 'L.S., F.N., M.K.',
91 maintainer_email=('[email protected], '
92 '[email protected], '
93 '[email protected]'),
94 url='http://coala.io/',
95 platforms='any',
96 packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),
97 install_requires=required,
98 tests_require=test_required,
99 package_data={'coalib': ['default_coafile', 'VERSION',
100 'bearlib/languages/documentation/*.coalang']
101 },
102 license='AGPL-3.0',
103 data_files=data_files,
104 long_description=long_description,
105 entry_points={
106 'console_scripts': [
107 'coala = coalib.coala:main',
108 'coala-ci = coalib.coala_ci:main',
109 'coala-json = coalib.coala_json:main',
110 'coala-format = coalib.coala_format:main',
111 'coala-delete-orig = coalib.coala_delete_orig:main']},
112 # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
113 classifiers=[
114 'Development Status :: 4 - Beta',
115
116 'Environment :: Console',
117 'Environment :: MacOS X',
118 'Environment :: Win32 (MS Windows)',
119 'Environment :: X11 Applications :: Gnome',
120
121 'Intended Audience :: Science/Research',
122 'Intended Audience :: Developers',
123
124 'License :: OSI Approved :: GNU Affero General Public License '
125 'v3 or later (AGPLv3+)',
126
127 'Operating System :: OS Independent',
128
129 'Programming Language :: Python :: Implementation :: CPython',
130 'Programming Language :: Python :: 3.4',
131 'Programming Language :: Python :: 3.5',
132 'Programming Language :: Python :: 3 :: Only',
133
134 'Topic :: Scientific/Engineering :: Information Analysis',
135 'Topic :: Software Development :: Quality Assurance',
136 'Topic :: Text Processing :: Linguistic'],
137 cmdclass={'build_manpage': BuildManPage,
138 'build_py': BuildPyCommand,
139 'docs': BuildDocsCommand,
140 'test': PyTestCommand})
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/misc/Constants.py b/coalib/misc/Constants.py
--- a/coalib/misc/Constants.py
+++ b/coalib/misc/Constants.py
@@ -3,6 +3,7 @@
import appdirs
import os
import re
+import logging
from coalib import VERSION
@@ -109,7 +110,17 @@
'languages',
'definitions')
-system_coafile = os.path.join(coalib_root, 'default_coafile')
+
+def get_system_coafile(coalib_root):
+ if os.path.isfile(os.path.join(coalib_root, 'system_coafile')):
+ return os.path.join(coalib_root, 'system_coafile')
+ else:
+ logging.warning('Filename deafult_coafile has been deprecated. '
+ 'Please use system_coafile instead.')
+ return os.path.join(coalib_root, 'default_coafile')
+
+
+system_coafile = get_system_coafile(coalib_root)
user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,7 +96,7 @@
packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),
install_requires=required,
tests_require=test_required,
- package_data={'coalib': ['default_coafile', 'VERSION',
+ package_data={'coalib': ['system_coafile', 'VERSION',
'bearlib/languages/documentation/*.coalang']
},
license='AGPL-3.0',
| {"golden_diff": "diff --git a/coalib/misc/Constants.py b/coalib/misc/Constants.py\n--- a/coalib/misc/Constants.py\n+++ b/coalib/misc/Constants.py\n@@ -3,6 +3,7 @@\n import appdirs\n import os\n import re\n+import logging\n \n from coalib import VERSION\n \n@@ -109,7 +110,17 @@\n 'languages',\n 'definitions')\n \n-system_coafile = os.path.join(coalib_root, 'default_coafile')\n+\n+def get_system_coafile(coalib_root):\n+ if os.path.isfile(os.path.join(coalib_root, 'system_coafile')):\n+ return os.path.join(coalib_root, 'system_coafile')\n+ else:\n+ logging.warning('Filename deafult_coafile has been deprecated. '\n+ 'Please use system_coafile instead.')\n+ return os.path.join(coalib_root, 'default_coafile')\n+\n+\n+system_coafile = get_system_coafile(coalib_root)\n \n user_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,7 +96,7 @@\n packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),\n install_requires=required,\n tests_require=test_required,\n- package_data={'coalib': ['default_coafile', 'VERSION',\n+ package_data={'coalib': ['system_coafile', 'VERSION',\n 'bearlib/languages/documentation/*.coalang']\n },\n license='AGPL-3.0',\n", "issue": "Change default_coafile naming convention.\nShould we change the name of `default_coafile` in `coalib` to `system_coafile`?\r\nIt will avoid confusion due to the following lines in `Constants.py`\r\n```\r\nsystem_coafile = os.path.join(coalib_root, 'default_coafile')\r\n\r\nuser_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\r\n\r\ndefault_coafile = '.coafile'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport appdirs\nimport os\nimport re\n\nfrom coalib import VERSION\n\n\nTHIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '\n 'Please contact the developers for assistance.')\n\nCRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '\n 'sorry for the inconvenience. Please contact the '\n 'developers for assistance. During execution of '\n 'coala an exception was raised. This should never '\n 'happen. When asked for, the following information '\n 'may help investigating:')\n\nVERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '\n 'dependency you have installed and the '\n 'requirements of coala. This may be resolved by '\n 'creating a separate virtual environment for '\n 'coala or running `pip3 install \"%s\"`. Be aware '\n 'that the latter solution might break other '\n 'python packages that depend on the currently '\n 'installed version.')\n\nOBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'\n\nTRUE_STRINGS = ['1',\n 'on',\n 'okay',\n 'ok',\n 'okey-dokey',\n 'y',\n 'yes',\n 'yeah',\n 'yea',\n 'ya',\n 'ye',\n 'yessir',\n 'sure',\n 'true',\n 'tru',\n 'uh-huh',\n 'definitely',\n 'yup',\n 'yep',\n 'right',\n 'aye',\n 'alright',\n 'alrighty',\n 'hell yeah',\n 'affirmative',\n 'certainly',\n 'definitely',\n 'absolutely',\n 'roger',\n 'righto',\n 'ja',\n 'da',\n 'si',\n 'oui',\n 'amen',\n 'totally',\n '10-4',\n 'positive']\n\nFALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nix',\n 'nope',\n 'nop',\n 'nah',\n 'nay',\n 'false',\n 'uh-uh',\n 'wrong',\n 'none',\n 'nay',\n 'hell no',\n 'fat chance',\n 'not a chance in hell',\n 'not in a million years',\n 'out of the question',\n 'no siree',\n 'no way',\n 'nein',\n 'njet',\n 'nee',\n 'non',\n 'hakuna',\n 'negative']\n\n# This string contains many unicode characters to challenge tests.\nCOMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 '\n '\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192')\n\n# Path to the coalib directory\ncoalib_root = os.path.join(os.path.dirname(__file__),\n os.path.pardir)\n\n# Path to the language definition files\nlanguage_definitions = os.path.join(coalib_root,\n 'bearlib',\n 'languages',\n 'definitions')\n\nsystem_coafile = os.path.join(coalib_root, 'default_coafile')\n\nuser_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\n\ndefault_coafile = '.coafile'\n\nUSER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)\n\nGLOBBING_SPECIAL_CHARS = '()[]|?*'\n\nURL_REGEX = re.compile(\n r'^(?:(?:http|ftp)[s]?://)?' # scheme\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+' # domain name\n r'(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|' # OR localhost\n r'(?:\\d{1,3}\\.){3}\\d{1,3})' # OR an ip\n r'(?::\\d+)?' # optional port number\n r'(?:/?|[/?]\\S+)$', # path\n re.IGNORECASE)\n", "path": "coalib/misc/Constants.py"}, {"content": "#!/usr/bin/env python3\n\nimport datetime\nimport locale\nimport platform\nimport sys\nfrom os import getenv\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom coalib import VERSION, assert_supported_version, get_version\nfrom coalib.misc.BuildManPage import BuildManPage\n\ntry:\n lc = locale.getlocale()\n pf = platform.system()\n if pf != 'Windows' and lc == (None, None):\n locale.setlocale(locale.LC_ALL, 'C.UTF-8')\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nassert_supported_version()\n\n\nclass BuildPyCommand(setuptools.command.build_py.build_py):\n\n def run(self):\n if platform.system() != 'Windows':\n self.run_command('build_manpage')\n setuptools.command.build_py.build_py.run(self)\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = (\n 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'\n )\n doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n errOne = call(self.apidoc_command)\n errTwo = call(self.doc_command)\n sys.exit(errOne or errTwo)\n\n\n# Generate API documentation only if we are running on readthedocs.io\non_rtd = getenv('READTHEDOCS', None) is not None\nif on_rtd:\n call(BuildDocsCommand.apidoc_command)\n if 'dev' in VERSION:\n current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',\n '-b {}'.format(current_version)])\n VERSION = get_version()\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\n\nif __name__ == '__main__':\n if platform.system() != 'Windows':\n data_files = [('.', ['coala.1'])]\n else:\n data_files = [('.', [])]\n\n setup(name='coala',\n version=VERSION,\n description='Linting and Fixing Code for All Languages',\n author='The coala developers',\n author_email='[email protected]',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger'\n if not on_rtd else 'L.S., F.N., M.K.',\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),\n install_requires=required,\n tests_require=test_required,\n package_data={'coalib': ['default_coafile', 'VERSION',\n 'bearlib/languages/documentation/*.coalang']\n },\n license='AGPL-3.0',\n data_files=data_files,\n long_description=long_description,\n entry_points={\n 'console_scripts': [\n 'coala = coalib.coala:main',\n 'coala-ci = coalib.coala_ci:main',\n 'coala-json = coalib.coala_json:main',\n 'coala-format = coalib.coala_format:main',\n 'coala-delete-orig = coalib.coala_delete_orig:main']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'build_manpage': BuildManPage,\n 'build_py': BuildPyCommand,\n 'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport appdirs\nimport os\nimport re\nimport logging\n\nfrom coalib import VERSION\n\n\nTHIS_IS_A_BUG = ('This is a bug. We are sorry for the inconvenience. '\n 'Please contact the developers for assistance.')\n\nCRASH_MESSAGE = ('An unknown error occurred. This is a bug. We are '\n 'sorry for the inconvenience. Please contact the '\n 'developers for assistance. During execution of '\n 'coala an exception was raised. This should never '\n 'happen. When asked for, the following information '\n 'may help investigating:')\n\nVERSION_CONFLICT_MESSAGE = ('There is a conflict in the version of a '\n 'dependency you have installed and the '\n 'requirements of coala. This may be resolved by '\n 'creating a separate virtual environment for '\n 'coala or running `pip3 install \"%s\"`. Be aware '\n 'that the latter solution might break other '\n 'python packages that depend on the currently '\n 'installed version.')\n\nOBJ_NOT_ACCESSIBLE = '{} is not accessible and will be ignored!'\n\nTRUE_STRINGS = ['1',\n 'on',\n 'okay',\n 'ok',\n 'okey-dokey',\n 'y',\n 'yes',\n 'yeah',\n 'yea',\n 'ya',\n 'ye',\n 'yessir',\n 'sure',\n 'true',\n 'tru',\n 'uh-huh',\n 'definitely',\n 'yup',\n 'yep',\n 'right',\n 'aye',\n 'alright',\n 'alrighty',\n 'hell yeah',\n 'affirmative',\n 'certainly',\n 'definitely',\n 'absolutely',\n 'roger',\n 'righto',\n 'ja',\n 'da',\n 'si',\n 'oui',\n 'amen',\n 'totally',\n '10-4',\n 'positive']\n\nFALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nix',\n 'nope',\n 'nop',\n 'nah',\n 'nay',\n 'false',\n 'uh-uh',\n 'wrong',\n 'none',\n 'nay',\n 'hell no',\n 'fat chance',\n 'not a chance in hell',\n 'not in a million years',\n 'out of the question',\n 'no siree',\n 'no way',\n 'nein',\n 'njet',\n 'nee',\n 'non',\n 'hakuna',\n 'negative']\n\n# This string contains many unicode characters to challenge tests.\nCOMPLEX_TEST_STRING = ('4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 '\n '\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192')\n\n# Path to the coalib directory\ncoalib_root = os.path.join(os.path.dirname(__file__),\n os.path.pardir)\n\n# Path to the language definition files\nlanguage_definitions = os.path.join(coalib_root,\n 'bearlib',\n 'languages',\n 'definitions')\n\n\ndef get_system_coafile(coalib_root):\n if os.path.isfile(os.path.join(coalib_root, 'system_coafile')):\n return os.path.join(coalib_root, 'system_coafile')\n else:\n logging.warning('Filename deafult_coafile has been deprecated. '\n 'Please use system_coafile instead.')\n return os.path.join(coalib_root, 'default_coafile')\n\n\nsystem_coafile = get_system_coafile(coalib_root)\n\nuser_coafile = os.path.join(os.path.expanduser('~'), '.coarc')\n\ndefault_coafile = '.coafile'\n\nUSER_DATA_DIR = appdirs.user_data_dir('coala', version=VERSION)\n\nGLOBBING_SPECIAL_CHARS = '()[]|?*'\n\nURL_REGEX = re.compile(\n r'^(?:(?:http|ftp)[s]?://)?' # scheme\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+' # domain name\n r'(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|' # OR localhost\n r'(?:\\d{1,3}\\.){3}\\d{1,3})' # OR an ip\n r'(?::\\d+)?' # optional port number\n r'(?:/?|[/?]\\S+)$', # path\n re.IGNORECASE)\n", "path": "coalib/misc/Constants.py"}, {"content": "#!/usr/bin/env python3\n\nimport datetime\nimport locale\nimport platform\nimport sys\nfrom os import getenv\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom coalib import VERSION, assert_supported_version, get_version\nfrom coalib.misc.BuildManPage import BuildManPage\n\ntry:\n lc = locale.getlocale()\n pf = platform.system()\n if pf != 'Windows' and lc == (None, None):\n locale.setlocale(locale.LC_ALL, 'C.UTF-8')\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nassert_supported_version()\n\n\nclass BuildPyCommand(setuptools.command.build_py.build_py):\n\n def run(self):\n if platform.system() != 'Windows':\n self.run_command('build_manpage')\n setuptools.command.build_py.build_py.run(self)\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = (\n 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'\n )\n doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n errOne = call(self.apidoc_command)\n errTwo = call(self.doc_command)\n sys.exit(errOne or errTwo)\n\n\n# Generate API documentation only if we are running on readthedocs.io\non_rtd = getenv('READTHEDOCS', None) is not None\nif on_rtd:\n call(BuildDocsCommand.apidoc_command)\n if 'dev' in VERSION:\n current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',\n '-b {}'.format(current_version)])\n VERSION = get_version()\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\n\nif __name__ == '__main__':\n if platform.system() != 'Windows':\n data_files = [('.', ['coala.1'])]\n else:\n data_files = [('.', [])]\n\n setup(name='coala',\n version=VERSION,\n description='Linting and Fixing Code for All Languages',\n author='The coala developers',\n author_email='[email protected]',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger'\n if not on_rtd else 'L.S., F.N., M.K.',\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),\n install_requires=required,\n tests_require=test_required,\n package_data={'coalib': ['system_coafile', 'VERSION',\n 'bearlib/languages/documentation/*.coalang']\n },\n license='AGPL-3.0',\n data_files=data_files,\n long_description=long_description,\n entry_points={\n 'console_scripts': [\n 'coala = coalib.coala:main',\n 'coala-ci = coalib.coala_ci:main',\n 'coala-json = coalib.coala_json:main',\n 'coala-format = coalib.coala_format:main',\n 'coala-delete-orig = coalib.coala_delete_orig:main']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'build_manpage': BuildManPage,\n 'build_py': BuildPyCommand,\n 'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}]} | 3,025 | 367 |
gh_patches_debug_7645 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/context/random/__init__.py`
Content:
```
1 from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,
2 sync_states, moe_set_seed, reset_seeds)
3
4 __all__ = [
5 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',
6 'sync_states', 'moe_set_seed', 'reset_seeds'
7 ]
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/context/random/__init__.py b/colossalai/context/random/__init__.py
--- a/colossalai/context/random/__init__.py
+++ b/colossalai/context/random/__init__.py
@@ -1,5 +1,16 @@
-from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,
- sync_states, moe_set_seed, reset_seeds)
+from ._helper import (
+ add_seed,
+ get_current_mode,
+ get_seeds,
+ get_states,
+ moe_set_seed,
+ reset_seeds,
+ seed,
+ set_mode,
+ set_seed_states,
+ sync_states,
+ with_seed,
+)
__all__ = [
'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',
| {"golden_diff": "diff --git a/colossalai/context/random/__init__.py b/colossalai/context/random/__init__.py\n--- a/colossalai/context/random/__init__.py\n+++ b/colossalai/context/random/__init__.py\n@@ -1,5 +1,16 @@\n-from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,\n- sync_states, moe_set_seed, reset_seeds)\n+from ._helper import (\n+ add_seed,\n+ get_current_mode,\n+ get_seeds,\n+ get_states,\n+ moe_set_seed,\n+ reset_seeds,\n+ seed,\n+ set_mode,\n+ set_seed_states,\n+ sync_states,\n+ with_seed,\n+)\n \n __all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,\n sync_states, moe_set_seed, reset_seeds)\n\n__all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n 'sync_states', 'moe_set_seed', 'reset_seeds'\n]\n", "path": "colossalai/context/random/__init__.py"}], "after_files": [{"content": "from ._helper import (\n add_seed,\n get_current_mode,\n get_seeds,\n get_states,\n moe_set_seed,\n reset_seeds,\n seed,\n set_mode,\n set_seed_states,\n sync_states,\n with_seed,\n)\n\n__all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n 'sync_states', 'moe_set_seed', 'reset_seeds'\n]\n", "path": "colossalai/context/random/__init__.py"}]} | 397 | 217 |
gh_patches_debug_1703 | rasdani/github-patches | git_diff | unionai-oss__pandera-1591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error Importing Pandera with Polars extra
**Describe the bug**
I get an error when importing pandera after installing the latest 0.19.0b2 version with the polars extra in a clean environment. I can import it successfully if I install without the polars extra.
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of pandera.
- [ ] (optional) I have confirmed this bug exists on the main branch of pandera.
#### Code Sample, a copy-pastable example
I installed pandera 0.19.0b2 in a clean virtual environment using `pip install pandera[polars]==0.19.0b2` and attempted to import pandera:
```python
import pandera as pa
```
I got the following error message:
```
>>> import pandera as pa
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".venv/lib/python3.11/site-packages/pandera/__init__.py", line 6, in <module>
from pandera import errors, external_config, typing
File ".venv/lib/python3.11/site-packages/pandera/external_config.py", line 23, in <module>
import pyspark.pandas
ModuleNotFoundError: No module named 'pyspark'
```
#### Versions:
- Pandera: 0.19.0b2
- Python: 3.11.7
- Ubuntu: 22.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandera/external_config.py`
Content:
```
1 """Configuration for external packages."""
2
3 import os
4
5 is_spark_local_ip_dirty = False
6 is_pyarrow_ignore_timezone_dirty = False
7
8 try:
9 # try importing pyspark to see if it exists. This is important because the
10 # pandera.typing module defines a Series type that inherits from
11 # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas
12 # Series and DataFrames to support type hinting:
13 # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names
14 # pylint: disable=unused-import
15 if os.getenv("SPARK_LOCAL_IP") is None:
16 is_spark_local_ip_dirty = True
17 os.environ["SPARK_LOCAL_IP"] = "127.0.0.1"
18 if os.getenv("PYARROW_IGNORE_TIMEZONE") is None:
19 is_pyarrow_ignore_timezone_dirty = True
20 # This can be overriden by the user
21 os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
22
23 import pyspark.pandas
24 finally:
25 if is_spark_local_ip_dirty:
26 os.environ.pop("SPARK_LOCAL_IP")
27 if is_pyarrow_ignore_timezone_dirty:
28 os.environ.pop("PYARROW_IGNORE_TIMEZONE")
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandera/external_config.py b/pandera/external_config.py
--- a/pandera/external_config.py
+++ b/pandera/external_config.py
@@ -21,6 +21,8 @@
os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
import pyspark.pandas
+except (ImportError, ModuleNotFoundError):
+ pass
finally:
if is_spark_local_ip_dirty:
os.environ.pop("SPARK_LOCAL_IP")
| {"golden_diff": "diff --git a/pandera/external_config.py b/pandera/external_config.py\n--- a/pandera/external_config.py\n+++ b/pandera/external_config.py\n@@ -21,6 +21,8 @@\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n \n import pyspark.pandas\n+except (ImportError, ModuleNotFoundError):\n+ pass\n finally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n", "issue": "Error Importing Pandera with Polars extra\n**Describe the bug**\r\nI get an error when importing pandera after installing the latest 0.19.0b2 version with the polars extra in a clean environment. I can import it successfully if I install without the polars extra.\r\n\r\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of pandera.\r\n- [ ] (optional) I have confirmed this bug exists on the main branch of pandera.\r\n\r\n#### Code Sample, a copy-pastable example\r\n\r\nI installed pandera 0.19.0b2 in a clean virtual environment using `pip install pandera[polars]==0.19.0b2` and attempted to import pandera:\r\n\r\n```python\r\nimport pandera as pa\r\n```\r\n\r\nI got the following error message:\r\n```\r\n>>> import pandera as pa\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \".venv/lib/python3.11/site-packages/pandera/__init__.py\", line 6, in <module>\r\n from pandera import errors, external_config, typing\r\n File \".venv/lib/python3.11/site-packages/pandera/external_config.py\", line 23, in <module>\r\n import pyspark.pandas\r\nModuleNotFoundError: No module named 'pyspark'\r\n```\r\n\r\n#### Versions:\r\n\r\n - Pandera: 0.19.0b2\r\n - Python: 3.11.7\r\n - Ubuntu: 22.04\r\n\n", "before_files": [{"content": "\"\"\"Configuration for external packages.\"\"\"\n\nimport os\n\nis_spark_local_ip_dirty = False\nis_pyarrow_ignore_timezone_dirty = False\n\ntry:\n # try importing pyspark to see if it exists. This is important because the\n # pandera.typing module defines a Series type that inherits from\n # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas\n # Series and DataFrames to support type hinting:\n # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names\n # pylint: disable=unused-import\n if os.getenv(\"SPARK_LOCAL_IP\") is None:\n is_spark_local_ip_dirty = True\n os.environ[\"SPARK_LOCAL_IP\"] = \"127.0.0.1\"\n if os.getenv(\"PYARROW_IGNORE_TIMEZONE\") is None:\n is_pyarrow_ignore_timezone_dirty = True\n # This can be overriden by the user\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n\n import pyspark.pandas\nfinally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n if is_pyarrow_ignore_timezone_dirty:\n os.environ.pop(\"PYARROW_IGNORE_TIMEZONE\")\n", "path": "pandera/external_config.py"}], "after_files": [{"content": "\"\"\"Configuration for external packages.\"\"\"\n\nimport os\n\nis_spark_local_ip_dirty = False\nis_pyarrow_ignore_timezone_dirty = False\n\ntry:\n # try importing pyspark to see if it exists. This is important because the\n # pandera.typing module defines a Series type that inherits from\n # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas\n # Series and DataFrames to support type hinting:\n # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names\n # pylint: disable=unused-import\n if os.getenv(\"SPARK_LOCAL_IP\") is None:\n is_spark_local_ip_dirty = True\n os.environ[\"SPARK_LOCAL_IP\"] = \"127.0.0.1\"\n if os.getenv(\"PYARROW_IGNORE_TIMEZONE\") is None:\n is_pyarrow_ignore_timezone_dirty = True\n # This can be overriden by the user\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n\n import pyspark.pandas\nexcept (ImportError, ModuleNotFoundError):\n pass\nfinally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n if is_pyarrow_ignore_timezone_dirty:\n os.environ.pop(\"PYARROW_IGNORE_TIMEZONE\")\n", "path": "pandera/external_config.py"}]} | 945 | 109 |
gh_patches_debug_33756 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1126 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
errno command is throwing errors
```
pwndbg> errno
Traceback (most recent call last):
File "/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py", line 145, in __call__
return self.function(*args, **kwargs)
File "/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py", line 239, in _OnlyWhenRunning
return function(*a, **kw)
File "/home/gsgx/code/pwndbg/pwndbg/commands/misc.py", line 32, in errno
errno_location = pwndbg.symbol.get("__errno_location")
AttributeError: '_ArgparsedCommand' object has no attribute 'symbol'
```
I don't know why the issue is occurring, but it can be fixed with this:
```diff
- errno_location = pwndbg.symbol.get("__errno_location")
- err = pwndbg.memory.int(errno_location)
+ errno_location = _pwndbg.symbol.address("__errno_location")
+ print(errno_location)
+ err = _pwndbg.memory.s32(errno_location)
```
But even after fixing that, there's an issue with the following lines:
```python
err = _pwndbg.memory.s32(errno_location)
#err = int(gdb.parse_and_eval('*((int *(*) (void)) __errno_location) ()'))
```
The commented out line is what actually works for me, while the existing code doesn't. I wonder if this was originally due to a change in glibc? If so, we should figure out what version and make this work on both versions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/lib/tips.py`
Content:
```
1 from random import choice
2
3 TIPS = [
4 # GDB hints
5 "GDB's `apropos <topic>` command displays all registered commands that are related to the given <topic>",
6 "GDB's `follow-fork-mode` parameter can be used to set whether to trace parent or child after fork() calls",
7 'Use GDB\'s `dprintf` command to print all calls to given function. E.g. `dprintf malloc, "malloc(%p)\\n", (void*)$rdi` will print all malloc calls',
8 "Use GDB's `pi` command to run an interactive Python console where you can use Pwndbg APIs like `pwndbg.gdb.memory.read(addr, len)`, `pwndbg.gdb.memory.write(addr, data)`, `pwndbg.gdb.vmmap.get()` and so on!",
9 "GDB's `set directories <path>` parameter can be used to debug e.g. glibc sources like the malloc/free functions!",
10 # Pwndbg hints
11 "GDB and Pwndbg parameters can be shown or set with `show <param>` and `set <param> <value>` GDB commands",
12 "Use Pwndbg's `config` and `theme` commands to tune its configuration and theme colors!",
13 "Pwndbg mirrors some of Windbg commands like `eq`, `ew`, `ed`, `eb`, `es`, `dq`, `dw`, `dd`, `db`, `ds` for writing and reading memory",
14 "Pwndbg resolves kernel memory maps by parsing page tables (default) or via `monitor info mem` QEMU gdbstub command (use `set kernel-vmmap-via-page-tables off` for that)",
15 "Use the `vmmap` instruction for a better & colored memory maps display (than the GDB's `info proc mappings`)",
16 "Use the `telescope` command to dereference a given address/pointer multiple times (if the dereferenced value is a valid ptr; see `config telescope` to configure its behavior)",
17 "Use the `context` (or `ctx`) command to display the context once again. You can reconfigure the context layout with `set context-section <sections>` or forward the output to a file/tty via `set context-output <file>`. See also `config context` to configure it further!",
18 "Disable Pwndbg context information display with `set context-sections ''`",
19 "Pwndbg context displays where the program branches to thanks to emulating few instructions into the future. You can disable this with `set emulate off` which may also speed up debugging",
20 "Use the `canary` command to see all stack canary/cookie values on the stack (based on the *usual* stack canary value initialized by glibc)",
21 "Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)",
22 "Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context",
23 "The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command",
24 ]
25
26
27 def get_tip_of_the_day() -> str:
28 return choice(TIPS)
29
```
Path: `pwndbg/commands/misc.py`
Content:
```
1 import argparse
2 import errno
3
4 import gdb
5
6 import pwndbg.auxv
7 import pwndbg.commands
8 import pwndbg.gdblib.arch as _arch
9 import pwndbg.regs
10 import pwndbg.symbol
11
12 errno.errorcode[0] = "OK"
13
14 parser = argparse.ArgumentParser(
15 description="""
16 Converts errno (or argument) to its string representation.
17 """
18 )
19 parser.add_argument(
20 "err",
21 type=int,
22 nargs="?",
23 default=None,
24 help="Errno; if not passed, it is retrieved from __errno_location",
25 )
26
27
28 @pwndbg.commands.ArgparsedCommand(parser, command_name="errno")
29 @pwndbg.commands.OnlyWhenRunning
30 def errno_(err):
31 if err is None:
32 # Try to get the `errno` variable value
33 # if it does not exist, get the errno variable from its location
34 try:
35 err = int(gdb.parse_and_eval("errno"))
36 except gdb.error:
37 try:
38 err = int(gdb.parse_and_eval("*((int *(*) (void)) __errno_location) ()"))
39 except gdb.error:
40 print(
41 "Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)"
42 )
43 return
44
45 msg = errno.errorcode.get(int(err), "Unknown error code")
46 print("Errno %s: %s" % (err, msg))
47
48
49 parser = argparse.ArgumentParser(
50 description="""
51 Prints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.
52 """
53 )
54 parser.add_argument(
55 "filter_pattern",
56 type=str,
57 nargs="?",
58 default=None,
59 help="Filter to apply to commands names/docs",
60 )
61
62
63 @pwndbg.commands.ArgparsedCommand(parser, command_name="pwndbg")
64 def pwndbg_(filter_pattern):
65 for name, docs in list_and_filter_commands(filter_pattern):
66 print("%-20s %s" % (name, docs))
67
68
69 parser = argparse.ArgumentParser(description="""Print the distance between the two arguments.""")
70 parser.add_argument("a", type=int, help="The first address.")
71 parser.add_argument("b", type=int, help="The second address.")
72
73
74 @pwndbg.commands.ArgparsedCommand(parser)
75 def distance(a, b):
76 """Print the distance between the two arguments"""
77 a = int(a) & pwndbg.arch.ptrmask
78 b = int(b) & pwndbg.arch.ptrmask
79
80 distance = b - a
81
82 print("%#x->%#x is %#x bytes (%#x words)" % (a, b, distance, distance // pwndbg.arch.ptrsize))
83
84
85 def list_and_filter_commands(filter_str):
86 sorted_commands = list(pwndbg.commands.commands)
87 sorted_commands.sort(key=lambda x: x.__name__)
88
89 if filter_str:
90 filter_str = filter_str.lower()
91
92 results = []
93
94 for c in sorted_commands:
95 name = c.__name__
96 docs = c.__doc__
97
98 if docs:
99 docs = docs.strip()
100 if docs:
101 docs = docs.splitlines()[0]
102
103 if not filter_str or filter_str in name.lower() or (docs and filter_str in docs.lower()):
104 results.append((name, docs))
105
106 return results
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/misc.py b/pwndbg/commands/misc.py
--- a/pwndbg/commands/misc.py
+++ b/pwndbg/commands/misc.py
@@ -35,10 +35,24 @@
err = int(gdb.parse_and_eval("errno"))
except gdb.error:
try:
- err = int(gdb.parse_and_eval("*((int *(*) (void)) __errno_location) ()"))
+ # We can't simply call __errno_location because its .plt.got entry may be uninitialized
+ # (e.g. if the binary was just started with `starti` command)
+ # So we have to check the got.plt entry first before calling it
+ errno_loc_gotplt = pwndbg.symbol.address("[email protected]")
+
+ # If the got.plt entry is not there (is None), it means the symbol is not used by the binary
+ if errno_loc_gotplt is None or pwndbg.vmmap.find(
+ pwndbg.memory.pvoid(errno_loc_gotplt)
+ ):
+ err = int(gdb.parse_and_eval("*((int *(*) (void)) __errno_location) ()"))
+ else:
+ print(
+ "Could not determine error code automatically: the [email protected] has no valid address yet (perhaps libc.so hasn't been loaded yet?)"
+ )
+ return
except gdb.error:
print(
- "Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)"
+ "Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (perhaps libc.so hasn't been not loaded yet?)"
)
return
diff --git a/pwndbg/lib/tips.py b/pwndbg/lib/tips.py
--- a/pwndbg/lib/tips.py
+++ b/pwndbg/lib/tips.py
@@ -21,6 +21,7 @@
"Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)",
"Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context",
"The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command",
+ "Use the `errno` (or `errno <number>`) command to see the name of the last or provided (libc) error",
]
| {"golden_diff": "diff --git a/pwndbg/commands/misc.py b/pwndbg/commands/misc.py\n--- a/pwndbg/commands/misc.py\n+++ b/pwndbg/commands/misc.py\n@@ -35,10 +35,24 @@\n err = int(gdb.parse_and_eval(\"errno\"))\n except gdb.error:\n try:\n- err = int(gdb.parse_and_eval(\"*((int *(*) (void)) __errno_location) ()\"))\n+ # We can't simply call __errno_location because its .plt.got entry may be uninitialized\n+ # (e.g. if the binary was just started with `starti` command)\n+ # So we have to check the got.plt entry first before calling it\n+ errno_loc_gotplt = pwndbg.symbol.address(\"[email protected]\")\n+\n+ # If the got.plt entry is not there (is None), it means the symbol is not used by the binary\n+ if errno_loc_gotplt is None or pwndbg.vmmap.find(\n+ pwndbg.memory.pvoid(errno_loc_gotplt)\n+ ):\n+ err = int(gdb.parse_and_eval(\"*((int *(*) (void)) __errno_location) ()\"))\n+ else:\n+ print(\n+ \"Could not determine error code automatically: the [email protected] has no valid address yet (perhaps libc.so hasn't been loaded yet?)\"\n+ )\n+ return\n except gdb.error:\n print(\n- \"Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)\"\n+ \"Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (perhaps libc.so hasn't been not loaded yet?)\"\n )\n return\n \ndiff --git a/pwndbg/lib/tips.py b/pwndbg/lib/tips.py\n--- a/pwndbg/lib/tips.py\n+++ b/pwndbg/lib/tips.py\n@@ -21,6 +21,7 @@\n \"Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)\",\n \"Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context\",\n \"The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command\",\n+ \"Use the `errno` (or `errno <number>`) command to see the name of the last or provided (libc) error\",\n ]\n", "issue": "errno command is throwing errors\n```\r\npwndbg> errno\r\nTraceback (most recent call last):\r\n File \"/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py\", line 145, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/gsgx/code/pwndbg/pwndbg/commands/__init__.py\", line 239, in _OnlyWhenRunning\r\n return function(*a, **kw)\r\n File \"/home/gsgx/code/pwndbg/pwndbg/commands/misc.py\", line 32, in errno\r\n errno_location = pwndbg.symbol.get(\"__errno_location\")\r\nAttributeError: '_ArgparsedCommand' object has no attribute 'symbol'\r\n```\r\n\r\nI don't know why the issue is occurring, but it can be fixed with this:\r\n```diff\r\n- errno_location = pwndbg.symbol.get(\"__errno_location\")\r\n- err = pwndbg.memory.int(errno_location)\r\n+ errno_location = _pwndbg.symbol.address(\"__errno_location\")\r\n+ print(errno_location)\r\n+ err = _pwndbg.memory.s32(errno_location)\r\n```\r\n\r\nBut even after fixing that, there's an issue with the following lines:\r\n```python\r\nerr = _pwndbg.memory.s32(errno_location)\r\n#err = int(gdb.parse_and_eval('*((int *(*) (void)) __errno_location) ()'))\r\n```\r\n\r\nThe commented out line is what actually works for me, while the existing code doesn't. I wonder if this was originally due to a change in glibc? If so, we should figure out what version and make this work on both versions.\r\n\r\n\n", "before_files": [{"content": "from random import choice\n\nTIPS = [\n # GDB hints\n \"GDB's `apropos <topic>` command displays all registered commands that are related to the given <topic>\",\n \"GDB's `follow-fork-mode` parameter can be used to set whether to trace parent or child after fork() calls\",\n 'Use GDB\\'s `dprintf` command to print all calls to given function. E.g. `dprintf malloc, \"malloc(%p)\\\\n\", (void*)$rdi` will print all malloc calls',\n \"Use GDB's `pi` command to run an interactive Python console where you can use Pwndbg APIs like `pwndbg.gdb.memory.read(addr, len)`, `pwndbg.gdb.memory.write(addr, data)`, `pwndbg.gdb.vmmap.get()` and so on!\",\n \"GDB's `set directories <path>` parameter can be used to debug e.g. glibc sources like the malloc/free functions!\",\n # Pwndbg hints\n \"GDB and Pwndbg parameters can be shown or set with `show <param>` and `set <param> <value>` GDB commands\",\n \"Use Pwndbg's `config` and `theme` commands to tune its configuration and theme colors!\",\n \"Pwndbg mirrors some of Windbg commands like `eq`, `ew`, `ed`, `eb`, `es`, `dq`, `dw`, `dd`, `db`, `ds` for writing and reading memory\",\n \"Pwndbg resolves kernel memory maps by parsing page tables (default) or via `monitor info mem` QEMU gdbstub command (use `set kernel-vmmap-via-page-tables off` for that)\",\n \"Use the `vmmap` instruction for a better & colored memory maps display (than the GDB's `info proc mappings`)\",\n \"Use the `telescope` command to dereference a given address/pointer multiple times (if the dereferenced value is a valid ptr; see `config telescope` to configure its behavior)\",\n \"Use the `context` (or `ctx`) command to display the context once again. You can reconfigure the context layout with `set context-section <sections>` or forward the output to a file/tty via `set context-output <file>`. See also `config context` to configure it further!\",\n \"Disable Pwndbg context information display with `set context-sections ''`\",\n \"Pwndbg context displays where the program branches to thanks to emulating few instructions into the future. You can disable this with `set emulate off` which may also speed up debugging\",\n \"Use the `canary` command to see all stack canary/cookie values on the stack (based on the *usual* stack canary value initialized by glibc)\",\n \"Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)\",\n \"Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context\",\n \"The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command\",\n]\n\n\ndef get_tip_of_the_day() -> str:\n return choice(TIPS)\n", "path": "pwndbg/lib/tips.py"}, {"content": "import argparse\nimport errno\n\nimport gdb\n\nimport pwndbg.auxv\nimport pwndbg.commands\nimport pwndbg.gdblib.arch as _arch\nimport pwndbg.regs\nimport pwndbg.symbol\n\nerrno.errorcode[0] = \"OK\"\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\nConverts errno (or argument) to its string representation.\n\"\"\"\n)\nparser.add_argument(\n \"err\",\n type=int,\n nargs=\"?\",\n default=None,\n help=\"Errno; if not passed, it is retrieved from __errno_location\",\n)\n\n\[email protected](parser, command_name=\"errno\")\[email protected]\ndef errno_(err):\n if err is None:\n # Try to get the `errno` variable value\n # if it does not exist, get the errno variable from its location\n try:\n err = int(gdb.parse_and_eval(\"errno\"))\n except gdb.error:\n try:\n err = int(gdb.parse_and_eval(\"*((int *(*) (void)) __errno_location) ()\"))\n except gdb.error:\n print(\n \"Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (was libc.so loaded already?)\"\n )\n return\n\n msg = errno.errorcode.get(int(err), \"Unknown error code\")\n print(\"Errno %s: %s\" % (err, msg))\n\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\nPrints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.\n\"\"\"\n)\nparser.add_argument(\n \"filter_pattern\",\n type=str,\n nargs=\"?\",\n default=None,\n help=\"Filter to apply to commands names/docs\",\n)\n\n\[email protected](parser, command_name=\"pwndbg\")\ndef pwndbg_(filter_pattern):\n for name, docs in list_and_filter_commands(filter_pattern):\n print(\"%-20s %s\" % (name, docs))\n\n\nparser = argparse.ArgumentParser(description=\"\"\"Print the distance between the two arguments.\"\"\")\nparser.add_argument(\"a\", type=int, help=\"The first address.\")\nparser.add_argument(\"b\", type=int, help=\"The second address.\")\n\n\[email protected](parser)\ndef distance(a, b):\n \"\"\"Print the distance between the two arguments\"\"\"\n a = int(a) & pwndbg.arch.ptrmask\n b = int(b) & pwndbg.arch.ptrmask\n\n distance = b - a\n\n print(\"%#x->%#x is %#x bytes (%#x words)\" % (a, b, distance, distance // pwndbg.arch.ptrsize))\n\n\ndef list_and_filter_commands(filter_str):\n sorted_commands = list(pwndbg.commands.commands)\n sorted_commands.sort(key=lambda x: x.__name__)\n\n if filter_str:\n filter_str = filter_str.lower()\n\n results = []\n\n for c in sorted_commands:\n name = c.__name__\n docs = c.__doc__\n\n if docs:\n docs = docs.strip()\n if docs:\n docs = docs.splitlines()[0]\n\n if not filter_str or filter_str in name.lower() or (docs and filter_str in docs.lower()):\n results.append((name, docs))\n\n return results\n", "path": "pwndbg/commands/misc.py"}], "after_files": [{"content": "from random import choice\n\nTIPS = [\n # GDB hints\n \"GDB's `apropos <topic>` command displays all registered commands that are related to the given <topic>\",\n \"GDB's `follow-fork-mode` parameter can be used to set whether to trace parent or child after fork() calls\",\n 'Use GDB\\'s `dprintf` command to print all calls to given function. E.g. `dprintf malloc, \"malloc(%p)\\\\n\", (void*)$rdi` will print all malloc calls',\n \"Use GDB's `pi` command to run an interactive Python console where you can use Pwndbg APIs like `pwndbg.gdb.memory.read(addr, len)`, `pwndbg.gdb.memory.write(addr, data)`, `pwndbg.gdb.vmmap.get()` and so on!\",\n \"GDB's `set directories <path>` parameter can be used to debug e.g. glibc sources like the malloc/free functions!\",\n # Pwndbg hints\n \"GDB and Pwndbg parameters can be shown or set with `show <param>` and `set <param> <value>` GDB commands\",\n \"Use Pwndbg's `config` and `theme` commands to tune its configuration and theme colors!\",\n \"Pwndbg mirrors some of Windbg commands like `eq`, `ew`, `ed`, `eb`, `es`, `dq`, `dw`, `dd`, `db`, `ds` for writing and reading memory\",\n \"Pwndbg resolves kernel memory maps by parsing page tables (default) or via `monitor info mem` QEMU gdbstub command (use `set kernel-vmmap-via-page-tables off` for that)\",\n \"Use the `vmmap` instruction for a better & colored memory maps display (than the GDB's `info proc mappings`)\",\n \"Use the `telescope` command to dereference a given address/pointer multiple times (if the dereferenced value is a valid ptr; see `config telescope` to configure its behavior)\",\n \"Use the `context` (or `ctx`) command to display the context once again. You can reconfigure the context layout with `set context-section <sections>` or forward the output to a file/tty via `set context-output <file>`. See also `config context` to configure it further!\",\n \"Disable Pwndbg context information display with `set context-sections ''`\",\n \"Pwndbg context displays where the program branches to thanks to emulating few instructions into the future. You can disable this with `set emulate off` which may also speed up debugging\",\n \"Use the `canary` command to see all stack canary/cookie values on the stack (based on the *usual* stack canary value initialized by glibc)\",\n \"Use the `procinfo` command for better process introspection (than the GDB's `info proc` command)\",\n \"Want to display each context panel in a separate tmux window? See https://github.com/pwndbg/pwndbg/blob/dev/FEATURES.md#splitting--layouting-context\",\n \"The $heap_base GDB variable can be used to refer to the starting address of the heap after running the `heap` command\",\n \"Use the `errno` (or `errno <number>`) command to see the name of the last or provided (libc) error\",\n]\n\n\ndef get_tip_of_the_day() -> str:\n return choice(TIPS)\n", "path": "pwndbg/lib/tips.py"}, {"content": "import argparse\nimport errno\n\nimport gdb\n\nimport pwndbg.auxv\nimport pwndbg.commands\nimport pwndbg.gdblib.arch as _arch\nimport pwndbg.regs\nimport pwndbg.symbol\n\nerrno.errorcode[0] = \"OK\"\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\nConverts errno (or argument) to its string representation.\n\"\"\"\n)\nparser.add_argument(\n \"err\",\n type=int,\n nargs=\"?\",\n default=None,\n help=\"Errno; if not passed, it is retrieved from __errno_location\",\n)\n\n\[email protected](parser, command_name=\"errno\")\[email protected]\ndef errno_(err):\n if err is None:\n # Try to get the `errno` variable value\n # if it does not exist, get the errno variable from its location\n try:\n err = int(gdb.parse_and_eval(\"errno\"))\n except gdb.error:\n try:\n # We can't simply call __errno_location because its .plt.got entry may be uninitialized\n # (e.g. if the binary was just started with `starti` command)\n # So we have to check the got.plt entry first before calling it\n errno_loc_gotplt = pwndbg.symbol.address(\"[email protected]\")\n\n # If the got.plt entry is not there (is None), it means the symbol is not used by the binary\n if errno_loc_gotplt is None or pwndbg.vmmap.find(\n pwndbg.memory.pvoid(errno_loc_gotplt)\n ):\n err = int(gdb.parse_and_eval(\"*((int *(*) (void)) __errno_location) ()\"))\n else:\n print(\n \"Could not determine error code automatically: the [email protected] has no valid address yet (perhaps libc.so hasn't been loaded yet?)\"\n )\n return\n except gdb.error:\n print(\n \"Could not determine error code automatically: neither `errno` nor `__errno_location` symbols were provided (perhaps libc.so hasn't been not loaded yet?)\"\n )\n return\n\n msg = errno.errorcode.get(int(err), \"Unknown error code\")\n print(\"Errno %s: %s\" % (err, msg))\n\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\nPrints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.\n\"\"\"\n)\nparser.add_argument(\n \"filter_pattern\",\n type=str,\n nargs=\"?\",\n default=None,\n help=\"Filter to apply to commands names/docs\",\n)\n\n\[email protected](parser, command_name=\"pwndbg\")\ndef pwndbg_(filter_pattern):\n for name, docs in list_and_filter_commands(filter_pattern):\n print(\"%-20s %s\" % (name, docs))\n\n\nparser = argparse.ArgumentParser(description=\"\"\"Print the distance between the two arguments.\"\"\")\nparser.add_argument(\"a\", type=int, help=\"The first address.\")\nparser.add_argument(\"b\", type=int, help=\"The second address.\")\n\n\[email protected](parser)\ndef distance(a, b):\n \"\"\"Print the distance between the two arguments\"\"\"\n a = int(a) & pwndbg.arch.ptrmask\n b = int(b) & pwndbg.arch.ptrmask\n\n distance = b - a\n\n print(\"%#x->%#x is %#x bytes (%#x words)\" % (a, b, distance, distance // pwndbg.arch.ptrsize))\n\n\ndef list_and_filter_commands(filter_str):\n sorted_commands = list(pwndbg.commands.commands)\n sorted_commands.sort(key=lambda x: x.__name__)\n\n if filter_str:\n filter_str = filter_str.lower()\n\n results = []\n\n for c in sorted_commands:\n name = c.__name__\n docs = c.__doc__\n\n if docs:\n docs = docs.strip()\n if docs:\n docs = docs.splitlines()[0]\n\n if not filter_str or filter_str in name.lower() or (docs and filter_str in docs.lower()):\n results.append((name, docs))\n\n return results\n", "path": "pwndbg/commands/misc.py"}]} | 2,323 | 573 |
gh_patches_debug_22260 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1392 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E0002 bug when using parameters for DynamoDB AttributeDefinitions
*cfn-lint version: 0.28.2*
*Description of issue.*
Rule E3039 (added in 0.28.0) doesn't support Refs and results in a E0002 error for the template.
Repeatable with this template snippet:
```
AWSTemplateFormatVersion: '2010-09-09'
Parameters:
HashKeyName:
Description: Primary Key Name
Type: String
AllowedPattern: '[a-zA-Z0-9]*'
MinLength: '1'
MaxLength: '2048'
ConstraintDescription: must contain only alphanumberic characters
HashKeyType:
Description: Primary Key Type
Type: String
Default: S
AllowedPattern: '[S|N]'
MinLength: '1'
MaxLength: '1'
ConstraintDescription: must be either S or N
RangeKeyName:
Description: Sort Key Name
Type: String
Default: 'NA'
AllowedPattern: '[a-zA-Z0-9]*'
MinLength: '0'
MaxLength: '2048'
ConstraintDescription: must contain only alphanumberic characters
RangeKeyType:
Description: Sort Key Type
Type: String
Default: S
AllowedPattern: '[S|N]'
MinLength: '0'
MaxLength: '1'
ConstraintDescription: must be either S or Ns
Conditions:
isRangeKeyAvailable: !Not [ !Equals [ !Ref RangeKeyName, 'NA' ] ]
Resources:
DynamoDBTable:
DeletionPolicy: Delete
UpdateReplacePolicy: Delete
Type: AWS::DynamoDB::Table
Properties:
AttributeDefinitions: !If
- isRangeKeyAvailable
- - AttributeName: !Ref HashKeyName
AttributeType: !Ref HashKeyType
- AttributeName: !Ref RangeKeyName
AttributeType: !Ref RangeKeyType
- - AttributeName: !Ref HashKeyName
AttributeType: !Ref HashKeyType
KeySchema: !If
- isRangeKeyAvailable
- - AttributeName: !Ref HashKeyName
KeyType: HASH
- AttributeName: !Ref RangeKeyName
KeyType: RANGE
- - AttributeName: !Ref HashKeyName
KeyType: HASH
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.decode.node import list_node
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9
10
11 class AttributeMismatch(CloudFormationLintRule):
12 """Check DynamoDB Attributes"""
13 id = 'E3039'
14 shortdesc = 'AttributeDefinitions / KeySchemas mismatch'
15 description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'
16 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'
17 tags = ['resources', 'dynamodb']
18
19 def __init__(self):
20 """Init"""
21 super(AttributeMismatch, self).__init__()
22 self.resource_property_types = ['AWS::DynamoDB::Table']
23
24 def _get_key_schema_attributes(self, key_schemas_sets):
25 """ Get Key Schema attributes """
26 keys = set()
27
28 for properties, _ in key_schemas_sets:
29 for key in properties:
30 attribute_name = key.get_safe('AttributeName', type_t=six.string_types)
31 if attribute_name:
32 keys.add(key.get('AttributeName'))
33 return keys
34
35 def _get_attribute_secondary(self, property_sets):
36 """ Get the key schemas from secondary indexes """
37 keys = set()
38
39 for properties, _ in property_sets:
40 for index in properties:
41 keys = keys.union(
42 self._get_key_schema_attributes(
43 index.get_safe('KeySchema', list_node([], None, None), [], list)
44 )
45 )
46
47 return keys
48
49 def check_property_set(self, property_set, path):
50 """ Check a property set """
51 matches = []
52 properties = property_set.get('Object')
53
54 keys = set()
55 attributes = set()
56
57 for attribute in properties.get('AttributeDefinitions', []):
58 attribute_name = attribute.get('AttributeName')
59 if isinstance(attribute_name, six.string_types):
60 attributes.add(attribute.get('AttributeName'))
61 else:
62 self.logger.info('attribute definitions is not using just strings')
63 return matches
64 keys = keys.union(
65 self._get_key_schema_attributes(
66 properties.get_safe('KeySchema', list_node([], None, None), [], list)
67 )
68 )
69 keys = keys.union(self._get_attribute_secondary(
70 properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list
71 ))) # pylint: disable=bad-continuation
72 keys = keys.union(self._get_attribute_secondary(
73 properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list
74 ))) # pylint: disable=bad-continuation
75
76 if attributes != keys:
77 message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'
78 matches.append(RuleMatch(
79 path,
80 message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))
81 ))
82
83 return matches
84
85 def check(self, properties, path, cfn):
86 """Check itself"""
87 matches = []
88
89 property_sets = cfn.get_object_without_conditions(properties, path)
90 for property_set in property_sets:
91 matches.extend(self.check_property_set(property_set, path))
92 return matches
93
94 def match_resource_properties(self, properties, _, path, cfn):
95 """Match for sub properties"""
96 matches = []
97 matches.extend(self.check(properties, path, cfn))
98 return matches
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
--- a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
+++ b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
@@ -77,7 +77,8 @@
message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'
matches.append(RuleMatch(
path,
- message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))
+ message.format(sorted(list(attributes)), sorted(
+ list(keys)), '/'.join(map(str, path)))
))
return matches
@@ -86,7 +87,8 @@
"""Check itself"""
matches = []
- property_sets = cfn.get_object_without_conditions(properties, path)
+ property_sets = cfn.get_object_without_conditions(
+ properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])
for property_set in property_sets:
matches.extend(self.check_property_set(property_set, path))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n--- a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n+++ b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n@@ -77,7 +77,8 @@\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n- message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))\n+ message.format(sorted(list(attributes)), sorted(\n+ list(keys)), '/'.join(map(str, path)))\n ))\n \n return matches\n@@ -86,7 +87,8 @@\n \"\"\"Check itself\"\"\"\n matches = []\n \n- property_sets = cfn.get_object_without_conditions(properties, path)\n+ property_sets = cfn.get_object_without_conditions(\n+ properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n", "issue": "E0002 bug when using parameters for DynamoDB AttributeDefinitions\n*cfn-lint version: 0.28.2*\r\n\r\n*Description of issue.*\r\n\r\nRule E3039 (added in 0.28.0) doesn't support Refs and results in a E0002 error for the template. \r\n\r\nRepeatable with this template snippet:\r\n\r\n```\r\nAWSTemplateFormatVersion: '2010-09-09'\r\n\r\nParameters:\r\n HashKeyName:\r\n Description: Primary Key Name\r\n Type: String\r\n AllowedPattern: '[a-zA-Z0-9]*'\r\n MinLength: '1'\r\n MaxLength: '2048'\r\n ConstraintDescription: must contain only alphanumberic characters\r\n\r\n HashKeyType:\r\n Description: Primary Key Type\r\n Type: String\r\n Default: S\r\n AllowedPattern: '[S|N]'\r\n MinLength: '1'\r\n MaxLength: '1'\r\n ConstraintDescription: must be either S or N\r\n\r\n RangeKeyName:\r\n Description: Sort Key Name\r\n Type: String\r\n Default: 'NA'\r\n AllowedPattern: '[a-zA-Z0-9]*'\r\n MinLength: '0'\r\n MaxLength: '2048'\r\n ConstraintDescription: must contain only alphanumberic characters\r\n\r\n RangeKeyType:\r\n Description: Sort Key Type\r\n Type: String\r\n Default: S\r\n AllowedPattern: '[S|N]'\r\n MinLength: '0'\r\n MaxLength: '1'\r\n ConstraintDescription: must be either S or Ns\r\n\r\nConditions:\r\n isRangeKeyAvailable: !Not [ !Equals [ !Ref RangeKeyName, 'NA' ] ]\r\n\r\nResources:\r\n DynamoDBTable:\r\n DeletionPolicy: Delete\r\n UpdateReplacePolicy: Delete\r\n Type: AWS::DynamoDB::Table\r\n Properties:\r\n AttributeDefinitions: !If\r\n - isRangeKeyAvailable\r\n - - AttributeName: !Ref HashKeyName\r\n AttributeType: !Ref HashKeyType\r\n - AttributeName: !Ref RangeKeyName\r\n AttributeType: !Ref RangeKeyType\r\n - - AttributeName: !Ref HashKeyName\r\n AttributeType: !Ref HashKeyType\r\n KeySchema: !If\r\n - isRangeKeyAvailable\r\n - - AttributeName: !Ref HashKeyName\r\n KeyType: HASH\r\n - AttributeName: !Ref RangeKeyName\r\n KeyType: RANGE\r\n - - AttributeName: !Ref HashKeyName\r\n KeyType: HASH\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.decode.node import list_node\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass AttributeMismatch(CloudFormationLintRule):\n \"\"\"Check DynamoDB Attributes\"\"\"\n id = 'E3039'\n shortdesc = 'AttributeDefinitions / KeySchemas mismatch'\n description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'\n tags = ['resources', 'dynamodb']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(AttributeMismatch, self).__init__()\n self.resource_property_types = ['AWS::DynamoDB::Table']\n\n def _get_key_schema_attributes(self, key_schemas_sets):\n \"\"\" Get Key Schema attributes \"\"\"\n keys = set()\n\n for properties, _ in key_schemas_sets:\n for key in properties:\n attribute_name = key.get_safe('AttributeName', type_t=six.string_types)\n if attribute_name:\n keys.add(key.get('AttributeName'))\n return keys\n\n def _get_attribute_secondary(self, property_sets):\n \"\"\" Get the key schemas from secondary indexes \"\"\"\n keys = set()\n\n for properties, _ in property_sets:\n for index in properties:\n keys = keys.union(\n self._get_key_schema_attributes(\n index.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n\n return keys\n\n def check_property_set(self, property_set, path):\n \"\"\" Check a property set \"\"\"\n matches = []\n properties = property_set.get('Object')\n\n keys = set()\n attributes = set()\n\n for attribute in properties.get('AttributeDefinitions', []):\n attribute_name = attribute.get('AttributeName')\n if isinstance(attribute_name, six.string_types):\n attributes.add(attribute.get('AttributeName'))\n else:\n self.logger.info('attribute definitions is not using just strings')\n return matches\n keys = keys.union(\n self._get_key_schema_attributes(\n properties.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n\n if attributes != keys:\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))\n ))\n\n return matches\n\n def check(self, properties, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n\n property_sets = cfn.get_object_without_conditions(properties, path)\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n matches.extend(self.check(properties, path, cfn))\n return matches\n", "path": "src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.decode.node import list_node\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass AttributeMismatch(CloudFormationLintRule):\n \"\"\"Check DynamoDB Attributes\"\"\"\n id = 'E3039'\n shortdesc = 'AttributeDefinitions / KeySchemas mismatch'\n description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'\n tags = ['resources', 'dynamodb']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(AttributeMismatch, self).__init__()\n self.resource_property_types = ['AWS::DynamoDB::Table']\n\n def _get_key_schema_attributes(self, key_schemas_sets):\n \"\"\" Get Key Schema attributes \"\"\"\n keys = set()\n\n for properties, _ in key_schemas_sets:\n for key in properties:\n attribute_name = key.get_safe('AttributeName', type_t=six.string_types)\n if attribute_name:\n keys.add(key.get('AttributeName'))\n return keys\n\n def _get_attribute_secondary(self, property_sets):\n \"\"\" Get the key schemas from secondary indexes \"\"\"\n keys = set()\n\n for properties, _ in property_sets:\n for index in properties:\n keys = keys.union(\n self._get_key_schema_attributes(\n index.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n\n return keys\n\n def check_property_set(self, property_set, path):\n \"\"\" Check a property set \"\"\"\n matches = []\n properties = property_set.get('Object')\n\n keys = set()\n attributes = set()\n\n for attribute in properties.get('AttributeDefinitions', []):\n attribute_name = attribute.get('AttributeName')\n if isinstance(attribute_name, six.string_types):\n attributes.add(attribute.get('AttributeName'))\n else:\n self.logger.info('attribute definitions is not using just strings')\n return matches\n keys = keys.union(\n self._get_key_schema_attributes(\n properties.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n\n if attributes != keys:\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n message.format(sorted(list(attributes)), sorted(\n list(keys)), '/'.join(map(str, path)))\n ))\n\n return matches\n\n def check(self, properties, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n\n property_sets = cfn.get_object_without_conditions(\n properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n matches.extend(self.check(properties, path, cfn))\n return matches\n", "path": "src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py"}]} | 1,753 | 256 |
gh_patches_debug_20862 | rasdani/github-patches | git_diff | conda__conda-5311 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--yes not functioning for "Clean"
Hi, either `conda clean -y --all` or `conda clean --yes --all` still prompt for a confirmation.
conda 4.2.13
macOS 10.12.4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/cli/main_clean.py`
Content:
```
1 # (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 from __future__ import absolute_import, division, print_function, unicode_literals
7
8 from collections import defaultdict
9 from logging import getLogger
10 import os
11 from os import listdir, lstat, walk
12 from os.path import getsize, isdir, join
13 import sys
14
15 from .common import add_parser_json, add_parser_yes, confirm_yn, stdout_json
16 from ..base.constants import CONDA_TARBALL_EXTENSION
17 from ..base.context import context
18 from ..exceptions import ArgumentError
19 from ..gateways.disk.delete import rm_rf
20 from ..gateways.disk.link import CrossPlatformStLink
21 from ..utils import human_bytes
22
23 log = getLogger(__name__)
24
25 descr = """
26 Remove unused packages and caches.
27 """
28
29 example = """
30 Examples:
31
32 conda clean --tarballs
33 """
34
35 def configure_parser(sub_parsers):
36 p = sub_parsers.add_parser(
37 'clean',
38 description=descr,
39 help=descr,
40 epilog=example,
41 )
42 add_parser_yes(p)
43 add_parser_json(p)
44 p.add_argument(
45 "-a", "--all",
46 action="store_true",
47 help="Remove index cache, lock files, tarballs, "
48 "unused cache packages, and source cache.",
49 )
50 p.add_argument(
51 "-i", "--index-cache",
52 action="store_true",
53 help="Remove index cache.",
54 )
55 p.add_argument(
56 "-l", "--lock",
57 action="store_true",
58 help="Remove all conda lock files.",
59 )
60 p.add_argument(
61 "-t", "--tarballs",
62 action="store_true",
63 help="Remove cached package tarballs.",
64 )
65 p.add_argument(
66 '-p', '--packages',
67 action='store_true',
68 help="""Remove unused cached packages. Warning: this does not check
69 for symlinked packages.""",
70 )
71 p.add_argument(
72 '-s', '--source-cache',
73 action='store_true',
74 help="""Remove files from the source cache of conda build.""",
75 )
76 p.set_defaults(func=execute)
77
78
79 def find_tarballs():
80 from ..core.package_cache import PackageCache
81 pkgs_dirs = defaultdict(list)
82 totalsize = 0
83 part_ext = CONDA_TARBALL_EXTENSION + '.part'
84 for package_cache in PackageCache.all_writable(context.pkgs_dirs):
85 pkgs_dir = package_cache.pkgs_dir
86 if not isdir(pkgs_dir):
87 continue
88 root, _, filenames = next(os.walk(pkgs_dir))
89 for fn in filenames:
90 if fn.endswith(CONDA_TARBALL_EXTENSION) or fn.endswith(part_ext):
91 pkgs_dirs[pkgs_dir].append(fn)
92 totalsize += getsize(join(root, fn))
93
94 return pkgs_dirs, totalsize
95
96
97 def rm_tarballs(args, pkgs_dirs, totalsize, verbose=True):
98 if verbose:
99 for pkgs_dir in pkgs_dirs:
100 print('Cache location: %s' % pkgs_dir)
101
102 if not any(pkgs_dirs[i] for i in pkgs_dirs):
103 if verbose:
104 print("There are no tarballs to remove")
105 return
106
107 if verbose:
108 print("Will remove the following tarballs:")
109 print()
110
111 for pkgs_dir in pkgs_dirs:
112 print(pkgs_dir)
113 print('-'*len(pkgs_dir))
114 fmt = "%-40s %10s"
115 for fn in pkgs_dirs[pkgs_dir]:
116 size = getsize(join(pkgs_dir, fn))
117 print(fmt % (fn, human_bytes(size)))
118 print()
119 print('-' * 51) # From 40 + 1 + 10 in fmt
120 print(fmt % ('Total:', human_bytes(totalsize)))
121 print()
122
123 if not context.json:
124 confirm_yn(args)
125 if context.json and args.dry_run:
126 return
127
128 for pkgs_dir in pkgs_dirs:
129 for fn in pkgs_dirs[pkgs_dir]:
130 try:
131 if rm_rf(os.path.join(pkgs_dir, fn)):
132 if verbose:
133 print("Removed %s" % fn)
134 else:
135 if verbose:
136 print("WARNING: cannot remove, file permissions: %s" % fn)
137 except (IOError, OSError) as e:
138 if verbose:
139 print("WARNING: cannot remove, file permissions: %s\n%r" % (fn, e))
140 else:
141 log.info("%r", e)
142
143
144 def find_pkgs():
145 # TODO: This doesn't handle packages that have hard links to files within
146 # themselves, like bin/python3.3 and bin/python3.3m in the Python package
147 warnings = []
148
149 cross_platform_st_nlink = CrossPlatformStLink()
150 pkgs_dirs = defaultdict(list)
151 for pkgs_dir in context.pkgs_dirs:
152 if not os.path.exists(pkgs_dir):
153 print("WARNING: {0} does not exist".format(pkgs_dir))
154 continue
155 pkgs = [i for i in listdir(pkgs_dir)
156 if (isdir(join(pkgs_dir, i)) and # only include actual packages
157 isdir(join(pkgs_dir, i, 'info')))]
158 for pkg in pkgs:
159 breakit = False
160 for root, dir, files in walk(join(pkgs_dir, pkg)):
161 for fn in files:
162 try:
163 st_nlink = cross_platform_st_nlink(join(root, fn))
164 except OSError as e:
165 warnings.append((fn, e))
166 continue
167 if st_nlink > 1:
168 # print('%s is installed: %s' % (pkg, join(root, fn)))
169 breakit = True
170 break
171
172 if breakit:
173 break
174 else:
175 pkgs_dirs[pkgs_dir].append(pkg)
176
177 totalsize = 0
178 pkgsizes = defaultdict(list)
179 for pkgs_dir in pkgs_dirs:
180 for pkg in pkgs_dirs[pkgs_dir]:
181 pkgsize = 0
182 for root, dir, files in walk(join(pkgs_dir, pkg)):
183 for fn in files:
184 # We don't have to worry about counting things twice: by
185 # definition these files all have a link count of 1!
186 size = lstat(join(root, fn)).st_size
187 totalsize += size
188 pkgsize += size
189 pkgsizes[pkgs_dir].append(pkgsize)
190
191 return pkgs_dirs, warnings, totalsize, pkgsizes
192
193
194 def rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes,
195 verbose=True):
196 if verbose:
197 for pkgs_dir in pkgs_dirs:
198 print('Cache location: %s' % pkgs_dir)
199 for fn, exception in warnings:
200 print(exception)
201
202 if not any(pkgs_dirs[i] for i in pkgs_dirs):
203 if verbose:
204 print("There are no unused packages to remove")
205 return
206
207 if verbose:
208 print("Will remove the following packages:")
209 for pkgs_dir in pkgs_dirs:
210 print(pkgs_dir)
211 print('-' * len(pkgs_dir))
212 print()
213 fmt = "%-40s %10s"
214 for pkg, pkgsize in zip(pkgs_dirs[pkgs_dir], pkgsizes[pkgs_dir]):
215 print(fmt % (pkg, human_bytes(pkgsize)))
216 print()
217 print('-' * 51) # 40 + 1 + 10 in fmt
218 print(fmt % ('Total:', human_bytes(totalsize)))
219 print()
220
221 if not context.json:
222 confirm_yn(args)
223 if context.json and args.dry_run:
224 return
225
226 for pkgs_dir in pkgs_dirs:
227 for pkg in pkgs_dirs[pkgs_dir]:
228 if verbose:
229 print("removing %s" % pkg)
230 rm_rf(join(pkgs_dir, pkg))
231
232
233 def rm_index_cache():
234 from ..gateways.disk.delete import rm_rf
235 from ..core.package_cache import PackageCache
236 for package_cache in PackageCache.all_writable():
237 rm_rf(join(package_cache.pkgs_dir, 'cache'))
238
239
240 def find_source_cache():
241 cache_dirs = {
242 'source cache': context.src_cache,
243 'git cache': context.git_cache,
244 'hg cache': context.hg_cache,
245 'svn cache': context.svn_cache,
246 }
247
248 sizes = {}
249 totalsize = 0
250 for cache_type, cache_dir in cache_dirs.items():
251 dirsize = 0
252 for root, d, files in walk(cache_dir):
253 for fn in files:
254 size = lstat(join(root, fn)).st_size
255 totalsize += size
256 dirsize += size
257 sizes[cache_type] = dirsize
258
259 return {
260 'warnings': [],
261 'cache_dirs': cache_dirs,
262 'cache_sizes': sizes,
263 'total_size': totalsize,
264 }
265
266
267 def rm_source_cache(args, cache_dirs, warnings, cache_sizes, total_size):
268 verbose = not context.json
269 if warnings:
270 if verbose:
271 for warning in warnings:
272 print(warning, file=sys.stderr)
273 return
274
275 for cache_type in cache_dirs:
276 print("%s (%s)" % (cache_type, cache_dirs[cache_type]))
277 print("%-40s %10s" % ("Size:", human_bytes(cache_sizes[cache_type])))
278 print()
279
280 print("%-40s %10s" % ("Total:", human_bytes(total_size)))
281
282 if not context.json:
283 confirm_yn(args)
284 if context.json and args.dry_run:
285 return
286
287 for dir in cache_dirs.values():
288 print("Removing %s" % dir)
289 rm_rf(dir)
290
291
292 def execute(args, parser):
293 json_result = {
294 'success': True
295 }
296
297 if args.tarballs or args.all:
298 pkgs_dirs, totalsize = find_tarballs()
299 first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''
300 json_result['tarballs'] = {
301 'pkgs_dir': first, # Backwards compabitility
302 'pkgs_dirs': dict(pkgs_dirs),
303 'files': pkgs_dirs[first], # Backwards compatibility
304 'total_size': totalsize
305 }
306 rm_tarballs(args, pkgs_dirs, totalsize, verbose=not context.json)
307
308 if args.index_cache or args.all:
309 json_result['index_cache'] = {
310 'files': [join(context.pkgs_dirs[0], 'cache')]
311 }
312 rm_index_cache()
313
314 if args.packages or args.all:
315 pkgs_dirs, warnings, totalsize, pkgsizes = find_pkgs()
316 first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''
317 json_result['packages'] = {
318 'pkgs_dir': first, # Backwards compatibility
319 'pkgs_dirs': dict(pkgs_dirs),
320 'files': pkgs_dirs[first], # Backwards compatibility
321 'total_size': totalsize,
322 'warnings': warnings,
323 'pkg_sizes': {i: dict(zip(pkgs_dirs[i], pkgsizes[i])) for i in pkgs_dirs},
324 }
325 rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes,
326 verbose=not context.json)
327
328 if args.source_cache or args.all:
329 json_result['source_cache'] = find_source_cache()
330 rm_source_cache(args, **json_result['source_cache'])
331
332 if not any((args.lock, args.tarballs, args.index_cache, args.packages,
333 args.source_cache, args.all)):
334 raise ArgumentError("One of {--lock, --tarballs, --index-cache, --packages, "
335 "--source-cache, --all} required")
336
337 if context.json:
338 stdout_json(json_result)
339
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/cli/main_clean.py b/conda/cli/main_clean.py
--- a/conda/cli/main_clean.py
+++ b/conda/cli/main_clean.py
@@ -120,7 +120,7 @@
print(fmt % ('Total:', human_bytes(totalsize)))
print()
- if not context.json:
+ if not context.json or not context.yes:
confirm_yn(args)
if context.json and args.dry_run:
return
@@ -218,7 +218,7 @@
print(fmt % ('Total:', human_bytes(totalsize)))
print()
- if not context.json:
+ if not context.json or not context.yes:
confirm_yn(args)
if context.json and args.dry_run:
return
@@ -279,7 +279,7 @@
print("%-40s %10s" % ("Total:", human_bytes(total_size)))
- if not context.json:
+ if not context.json or not context.yes:
confirm_yn(args)
if context.json and args.dry_run:
return
| {"golden_diff": "diff --git a/conda/cli/main_clean.py b/conda/cli/main_clean.py\n--- a/conda/cli/main_clean.py\n+++ b/conda/cli/main_clean.py\n@@ -120,7 +120,7 @@\n print(fmt % ('Total:', human_bytes(totalsize)))\n print()\n \n- if not context.json:\n+ if not context.json or not context.yes:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n@@ -218,7 +218,7 @@\n print(fmt % ('Total:', human_bytes(totalsize)))\n print()\n \n- if not context.json:\n+ if not context.json or not context.yes:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n@@ -279,7 +279,7 @@\n \n print(\"%-40s %10s\" % (\"Total:\", human_bytes(total_size)))\n \n- if not context.json:\n+ if not context.json or not context.yes:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n", "issue": "--yes not functioning for \"Clean\"\nHi, either `conda clean -y --all` or `conda clean --yes --all` still prompt for a confirmation. \r\n\r\nconda 4.2.13\r\nmacOS 10.12.4\n", "before_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import defaultdict\nfrom logging import getLogger\nimport os\nfrom os import listdir, lstat, walk\nfrom os.path import getsize, isdir, join\nimport sys\n\nfrom .common import add_parser_json, add_parser_yes, confirm_yn, stdout_json\nfrom ..base.constants import CONDA_TARBALL_EXTENSION\nfrom ..base.context import context\nfrom ..exceptions import ArgumentError\nfrom ..gateways.disk.delete import rm_rf\nfrom ..gateways.disk.link import CrossPlatformStLink\nfrom ..utils import human_bytes\n\nlog = getLogger(__name__)\n\ndescr = \"\"\"\nRemove unused packages and caches.\n\"\"\"\n\nexample = \"\"\"\nExamples:\n\n conda clean --tarballs\n\"\"\"\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'clean',\n description=descr,\n help=descr,\n epilog=example,\n )\n add_parser_yes(p)\n add_parser_json(p)\n p.add_argument(\n \"-a\", \"--all\",\n action=\"store_true\",\n help=\"Remove index cache, lock files, tarballs, \"\n \"unused cache packages, and source cache.\",\n )\n p.add_argument(\n \"-i\", \"--index-cache\",\n action=\"store_true\",\n help=\"Remove index cache.\",\n )\n p.add_argument(\n \"-l\", \"--lock\",\n action=\"store_true\",\n help=\"Remove all conda lock files.\",\n )\n p.add_argument(\n \"-t\", \"--tarballs\",\n action=\"store_true\",\n help=\"Remove cached package tarballs.\",\n )\n p.add_argument(\n '-p', '--packages',\n action='store_true',\n help=\"\"\"Remove unused cached packages. Warning: this does not check\n for symlinked packages.\"\"\",\n )\n p.add_argument(\n '-s', '--source-cache',\n action='store_true',\n help=\"\"\"Remove files from the source cache of conda build.\"\"\",\n )\n p.set_defaults(func=execute)\n\n\ndef find_tarballs():\n from ..core.package_cache import PackageCache\n pkgs_dirs = defaultdict(list)\n totalsize = 0\n part_ext = CONDA_TARBALL_EXTENSION + '.part'\n for package_cache in PackageCache.all_writable(context.pkgs_dirs):\n pkgs_dir = package_cache.pkgs_dir\n if not isdir(pkgs_dir):\n continue\n root, _, filenames = next(os.walk(pkgs_dir))\n for fn in filenames:\n if fn.endswith(CONDA_TARBALL_EXTENSION) or fn.endswith(part_ext):\n pkgs_dirs[pkgs_dir].append(fn)\n totalsize += getsize(join(root, fn))\n\n return pkgs_dirs, totalsize\n\n\ndef rm_tarballs(args, pkgs_dirs, totalsize, verbose=True):\n if verbose:\n for pkgs_dir in pkgs_dirs:\n print('Cache location: %s' % pkgs_dir)\n\n if not any(pkgs_dirs[i] for i in pkgs_dirs):\n if verbose:\n print(\"There are no tarballs to remove\")\n return\n\n if verbose:\n print(\"Will remove the following tarballs:\")\n print()\n\n for pkgs_dir in pkgs_dirs:\n print(pkgs_dir)\n print('-'*len(pkgs_dir))\n fmt = \"%-40s %10s\"\n for fn in pkgs_dirs[pkgs_dir]:\n size = getsize(join(pkgs_dir, fn))\n print(fmt % (fn, human_bytes(size)))\n print()\n print('-' * 51) # From 40 + 1 + 10 in fmt\n print(fmt % ('Total:', human_bytes(totalsize)))\n print()\n\n if not context.json:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n\n for pkgs_dir in pkgs_dirs:\n for fn in pkgs_dirs[pkgs_dir]:\n try:\n if rm_rf(os.path.join(pkgs_dir, fn)):\n if verbose:\n print(\"Removed %s\" % fn)\n else:\n if verbose:\n print(\"WARNING: cannot remove, file permissions: %s\" % fn)\n except (IOError, OSError) as e:\n if verbose:\n print(\"WARNING: cannot remove, file permissions: %s\\n%r\" % (fn, e))\n else:\n log.info(\"%r\", e)\n\n\ndef find_pkgs():\n # TODO: This doesn't handle packages that have hard links to files within\n # themselves, like bin/python3.3 and bin/python3.3m in the Python package\n warnings = []\n\n cross_platform_st_nlink = CrossPlatformStLink()\n pkgs_dirs = defaultdict(list)\n for pkgs_dir in context.pkgs_dirs:\n if not os.path.exists(pkgs_dir):\n print(\"WARNING: {0} does not exist\".format(pkgs_dir))\n continue\n pkgs = [i for i in listdir(pkgs_dir)\n if (isdir(join(pkgs_dir, i)) and # only include actual packages\n isdir(join(pkgs_dir, i, 'info')))]\n for pkg in pkgs:\n breakit = False\n for root, dir, files in walk(join(pkgs_dir, pkg)):\n for fn in files:\n try:\n st_nlink = cross_platform_st_nlink(join(root, fn))\n except OSError as e:\n warnings.append((fn, e))\n continue\n if st_nlink > 1:\n # print('%s is installed: %s' % (pkg, join(root, fn)))\n breakit = True\n break\n\n if breakit:\n break\n else:\n pkgs_dirs[pkgs_dir].append(pkg)\n\n totalsize = 0\n pkgsizes = defaultdict(list)\n for pkgs_dir in pkgs_dirs:\n for pkg in pkgs_dirs[pkgs_dir]:\n pkgsize = 0\n for root, dir, files in walk(join(pkgs_dir, pkg)):\n for fn in files:\n # We don't have to worry about counting things twice: by\n # definition these files all have a link count of 1!\n size = lstat(join(root, fn)).st_size\n totalsize += size\n pkgsize += size\n pkgsizes[pkgs_dir].append(pkgsize)\n\n return pkgs_dirs, warnings, totalsize, pkgsizes\n\n\ndef rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes,\n verbose=True):\n if verbose:\n for pkgs_dir in pkgs_dirs:\n print('Cache location: %s' % pkgs_dir)\n for fn, exception in warnings:\n print(exception)\n\n if not any(pkgs_dirs[i] for i in pkgs_dirs):\n if verbose:\n print(\"There are no unused packages to remove\")\n return\n\n if verbose:\n print(\"Will remove the following packages:\")\n for pkgs_dir in pkgs_dirs:\n print(pkgs_dir)\n print('-' * len(pkgs_dir))\n print()\n fmt = \"%-40s %10s\"\n for pkg, pkgsize in zip(pkgs_dirs[pkgs_dir], pkgsizes[pkgs_dir]):\n print(fmt % (pkg, human_bytes(pkgsize)))\n print()\n print('-' * 51) # 40 + 1 + 10 in fmt\n print(fmt % ('Total:', human_bytes(totalsize)))\n print()\n\n if not context.json:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n\n for pkgs_dir in pkgs_dirs:\n for pkg in pkgs_dirs[pkgs_dir]:\n if verbose:\n print(\"removing %s\" % pkg)\n rm_rf(join(pkgs_dir, pkg))\n\n\ndef rm_index_cache():\n from ..gateways.disk.delete import rm_rf\n from ..core.package_cache import PackageCache\n for package_cache in PackageCache.all_writable():\n rm_rf(join(package_cache.pkgs_dir, 'cache'))\n\n\ndef find_source_cache():\n cache_dirs = {\n 'source cache': context.src_cache,\n 'git cache': context.git_cache,\n 'hg cache': context.hg_cache,\n 'svn cache': context.svn_cache,\n }\n\n sizes = {}\n totalsize = 0\n for cache_type, cache_dir in cache_dirs.items():\n dirsize = 0\n for root, d, files in walk(cache_dir):\n for fn in files:\n size = lstat(join(root, fn)).st_size\n totalsize += size\n dirsize += size\n sizes[cache_type] = dirsize\n\n return {\n 'warnings': [],\n 'cache_dirs': cache_dirs,\n 'cache_sizes': sizes,\n 'total_size': totalsize,\n }\n\n\ndef rm_source_cache(args, cache_dirs, warnings, cache_sizes, total_size):\n verbose = not context.json\n if warnings:\n if verbose:\n for warning in warnings:\n print(warning, file=sys.stderr)\n return\n\n for cache_type in cache_dirs:\n print(\"%s (%s)\" % (cache_type, cache_dirs[cache_type]))\n print(\"%-40s %10s\" % (\"Size:\", human_bytes(cache_sizes[cache_type])))\n print()\n\n print(\"%-40s %10s\" % (\"Total:\", human_bytes(total_size)))\n\n if not context.json:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n\n for dir in cache_dirs.values():\n print(\"Removing %s\" % dir)\n rm_rf(dir)\n\n\ndef execute(args, parser):\n json_result = {\n 'success': True\n }\n\n if args.tarballs or args.all:\n pkgs_dirs, totalsize = find_tarballs()\n first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''\n json_result['tarballs'] = {\n 'pkgs_dir': first, # Backwards compabitility\n 'pkgs_dirs': dict(pkgs_dirs),\n 'files': pkgs_dirs[first], # Backwards compatibility\n 'total_size': totalsize\n }\n rm_tarballs(args, pkgs_dirs, totalsize, verbose=not context.json)\n\n if args.index_cache or args.all:\n json_result['index_cache'] = {\n 'files': [join(context.pkgs_dirs[0], 'cache')]\n }\n rm_index_cache()\n\n if args.packages or args.all:\n pkgs_dirs, warnings, totalsize, pkgsizes = find_pkgs()\n first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''\n json_result['packages'] = {\n 'pkgs_dir': first, # Backwards compatibility\n 'pkgs_dirs': dict(pkgs_dirs),\n 'files': pkgs_dirs[first], # Backwards compatibility\n 'total_size': totalsize,\n 'warnings': warnings,\n 'pkg_sizes': {i: dict(zip(pkgs_dirs[i], pkgsizes[i])) for i in pkgs_dirs},\n }\n rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes,\n verbose=not context.json)\n\n if args.source_cache or args.all:\n json_result['source_cache'] = find_source_cache()\n rm_source_cache(args, **json_result['source_cache'])\n\n if not any((args.lock, args.tarballs, args.index_cache, args.packages,\n args.source_cache, args.all)):\n raise ArgumentError(\"One of {--lock, --tarballs, --index-cache, --packages, \"\n \"--source-cache, --all} required\")\n\n if context.json:\n stdout_json(json_result)\n", "path": "conda/cli/main_clean.py"}], "after_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import defaultdict\nfrom logging import getLogger\nimport os\nfrom os import listdir, lstat, walk\nfrom os.path import getsize, isdir, join\nimport sys\n\nfrom .common import add_parser_json, add_parser_yes, confirm_yn, stdout_json\nfrom ..base.constants import CONDA_TARBALL_EXTENSION\nfrom ..base.context import context\nfrom ..exceptions import ArgumentError\nfrom ..gateways.disk.delete import rm_rf\nfrom ..gateways.disk.link import CrossPlatformStLink\nfrom ..utils import human_bytes\n\nlog = getLogger(__name__)\n\ndescr = \"\"\"\nRemove unused packages and caches.\n\"\"\"\n\nexample = \"\"\"\nExamples:\n\n conda clean --tarballs\n\"\"\"\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'clean',\n description=descr,\n help=descr,\n epilog=example,\n )\n add_parser_yes(p)\n add_parser_json(p)\n p.add_argument(\n \"-a\", \"--all\",\n action=\"store_true\",\n help=\"Remove index cache, lock files, tarballs, \"\n \"unused cache packages, and source cache.\",\n )\n p.add_argument(\n \"-i\", \"--index-cache\",\n action=\"store_true\",\n help=\"Remove index cache.\",\n )\n p.add_argument(\n \"-l\", \"--lock\",\n action=\"store_true\",\n help=\"Remove all conda lock files.\",\n )\n p.add_argument(\n \"-t\", \"--tarballs\",\n action=\"store_true\",\n help=\"Remove cached package tarballs.\",\n )\n p.add_argument(\n '-p', '--packages',\n action='store_true',\n help=\"\"\"Remove unused cached packages. Warning: this does not check\n for symlinked packages.\"\"\",\n )\n p.add_argument(\n '-s', '--source-cache',\n action='store_true',\n help=\"\"\"Remove files from the source cache of conda build.\"\"\",\n )\n p.set_defaults(func=execute)\n\n\ndef find_tarballs():\n from ..core.package_cache import PackageCache\n pkgs_dirs = defaultdict(list)\n totalsize = 0\n part_ext = CONDA_TARBALL_EXTENSION + '.part'\n for package_cache in PackageCache.all_writable(context.pkgs_dirs):\n pkgs_dir = package_cache.pkgs_dir\n if not isdir(pkgs_dir):\n continue\n root, _, filenames = next(os.walk(pkgs_dir))\n for fn in filenames:\n if fn.endswith(CONDA_TARBALL_EXTENSION) or fn.endswith(part_ext):\n pkgs_dirs[pkgs_dir].append(fn)\n totalsize += getsize(join(root, fn))\n\n return pkgs_dirs, totalsize\n\n\ndef rm_tarballs(args, pkgs_dirs, totalsize, verbose=True):\n if verbose:\n for pkgs_dir in pkgs_dirs:\n print('Cache location: %s' % pkgs_dir)\n\n if not any(pkgs_dirs[i] for i in pkgs_dirs):\n if verbose:\n print(\"There are no tarballs to remove\")\n return\n\n if verbose:\n print(\"Will remove the following tarballs:\")\n print()\n\n for pkgs_dir in pkgs_dirs:\n print(pkgs_dir)\n print('-'*len(pkgs_dir))\n fmt = \"%-40s %10s\"\n for fn in pkgs_dirs[pkgs_dir]:\n size = getsize(join(pkgs_dir, fn))\n print(fmt % (fn, human_bytes(size)))\n print()\n print('-' * 51) # From 40 + 1 + 10 in fmt\n print(fmt % ('Total:', human_bytes(totalsize)))\n print()\n\n if not context.json or not context.yes:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n\n for pkgs_dir in pkgs_dirs:\n for fn in pkgs_dirs[pkgs_dir]:\n try:\n if rm_rf(os.path.join(pkgs_dir, fn)):\n if verbose:\n print(\"Removed %s\" % fn)\n else:\n if verbose:\n print(\"WARNING: cannot remove, file permissions: %s\" % fn)\n except (IOError, OSError) as e:\n if verbose:\n print(\"WARNING: cannot remove, file permissions: %s\\n%r\" % (fn, e))\n else:\n log.info(\"%r\", e)\n\n\ndef find_pkgs():\n # TODO: This doesn't handle packages that have hard links to files within\n # themselves, like bin/python3.3 and bin/python3.3m in the Python package\n warnings = []\n\n cross_platform_st_nlink = CrossPlatformStLink()\n pkgs_dirs = defaultdict(list)\n for pkgs_dir in context.pkgs_dirs:\n if not os.path.exists(pkgs_dir):\n print(\"WARNING: {0} does not exist\".format(pkgs_dir))\n continue\n pkgs = [i for i in listdir(pkgs_dir)\n if (isdir(join(pkgs_dir, i)) and # only include actual packages\n isdir(join(pkgs_dir, i, 'info')))]\n for pkg in pkgs:\n breakit = False\n for root, dir, files in walk(join(pkgs_dir, pkg)):\n for fn in files:\n try:\n st_nlink = cross_platform_st_nlink(join(root, fn))\n except OSError as e:\n warnings.append((fn, e))\n continue\n if st_nlink > 1:\n # print('%s is installed: %s' % (pkg, join(root, fn)))\n breakit = True\n break\n\n if breakit:\n break\n else:\n pkgs_dirs[pkgs_dir].append(pkg)\n\n totalsize = 0\n pkgsizes = defaultdict(list)\n for pkgs_dir in pkgs_dirs:\n for pkg in pkgs_dirs[pkgs_dir]:\n pkgsize = 0\n for root, dir, files in walk(join(pkgs_dir, pkg)):\n for fn in files:\n # We don't have to worry about counting things twice: by\n # definition these files all have a link count of 1!\n size = lstat(join(root, fn)).st_size\n totalsize += size\n pkgsize += size\n pkgsizes[pkgs_dir].append(pkgsize)\n\n return pkgs_dirs, warnings, totalsize, pkgsizes\n\n\ndef rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes,\n verbose=True):\n if verbose:\n for pkgs_dir in pkgs_dirs:\n print('Cache location: %s' % pkgs_dir)\n for fn, exception in warnings:\n print(exception)\n\n if not any(pkgs_dirs[i] for i in pkgs_dirs):\n if verbose:\n print(\"There are no unused packages to remove\")\n return\n\n if verbose:\n print(\"Will remove the following packages:\")\n for pkgs_dir in pkgs_dirs:\n print(pkgs_dir)\n print('-' * len(pkgs_dir))\n print()\n fmt = \"%-40s %10s\"\n for pkg, pkgsize in zip(pkgs_dirs[pkgs_dir], pkgsizes[pkgs_dir]):\n print(fmt % (pkg, human_bytes(pkgsize)))\n print()\n print('-' * 51) # 40 + 1 + 10 in fmt\n print(fmt % ('Total:', human_bytes(totalsize)))\n print()\n\n if not context.json or not context.yes:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n\n for pkgs_dir in pkgs_dirs:\n for pkg in pkgs_dirs[pkgs_dir]:\n if verbose:\n print(\"removing %s\" % pkg)\n rm_rf(join(pkgs_dir, pkg))\n\n\ndef rm_index_cache():\n from ..gateways.disk.delete import rm_rf\n from ..core.package_cache import PackageCache\n for package_cache in PackageCache.all_writable():\n rm_rf(join(package_cache.pkgs_dir, 'cache'))\n\n\ndef find_source_cache():\n cache_dirs = {\n 'source cache': context.src_cache,\n 'git cache': context.git_cache,\n 'hg cache': context.hg_cache,\n 'svn cache': context.svn_cache,\n }\n\n sizes = {}\n totalsize = 0\n for cache_type, cache_dir in cache_dirs.items():\n dirsize = 0\n for root, d, files in walk(cache_dir):\n for fn in files:\n size = lstat(join(root, fn)).st_size\n totalsize += size\n dirsize += size\n sizes[cache_type] = dirsize\n\n return {\n 'warnings': [],\n 'cache_dirs': cache_dirs,\n 'cache_sizes': sizes,\n 'total_size': totalsize,\n }\n\n\ndef rm_source_cache(args, cache_dirs, warnings, cache_sizes, total_size):\n verbose = not context.json\n if warnings:\n if verbose:\n for warning in warnings:\n print(warning, file=sys.stderr)\n return\n\n for cache_type in cache_dirs:\n print(\"%s (%s)\" % (cache_type, cache_dirs[cache_type]))\n print(\"%-40s %10s\" % (\"Size:\", human_bytes(cache_sizes[cache_type])))\n print()\n\n print(\"%-40s %10s\" % (\"Total:\", human_bytes(total_size)))\n\n if not context.json or not context.yes:\n confirm_yn(args)\n if context.json and args.dry_run:\n return\n\n for dir in cache_dirs.values():\n print(\"Removing %s\" % dir)\n rm_rf(dir)\n\n\ndef execute(args, parser):\n json_result = {\n 'success': True\n }\n\n if args.tarballs or args.all:\n pkgs_dirs, totalsize = find_tarballs()\n first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''\n json_result['tarballs'] = {\n 'pkgs_dir': first, # Backwards compabitility\n 'pkgs_dirs': dict(pkgs_dirs),\n 'files': pkgs_dirs[first], # Backwards compatibility\n 'total_size': totalsize\n }\n rm_tarballs(args, pkgs_dirs, totalsize, verbose=not context.json)\n\n if args.index_cache or args.all:\n json_result['index_cache'] = {\n 'files': [join(context.pkgs_dirs[0], 'cache')]\n }\n rm_index_cache()\n\n if args.packages or args.all:\n pkgs_dirs, warnings, totalsize, pkgsizes = find_pkgs()\n first = sorted(pkgs_dirs)[0] if pkgs_dirs else ''\n json_result['packages'] = {\n 'pkgs_dir': first, # Backwards compatibility\n 'pkgs_dirs': dict(pkgs_dirs),\n 'files': pkgs_dirs[first], # Backwards compatibility\n 'total_size': totalsize,\n 'warnings': warnings,\n 'pkg_sizes': {i: dict(zip(pkgs_dirs[i], pkgsizes[i])) for i in pkgs_dirs},\n }\n rm_pkgs(args, pkgs_dirs, warnings, totalsize, pkgsizes,\n verbose=not context.json)\n\n if args.source_cache or args.all:\n json_result['source_cache'] = find_source_cache()\n rm_source_cache(args, **json_result['source_cache'])\n\n if not any((args.lock, args.tarballs, args.index_cache, args.packages,\n args.source_cache, args.all)):\n raise ArgumentError(\"One of {--lock, --tarballs, --index-cache, --packages, \"\n \"--source-cache, --all} required\")\n\n if context.json:\n stdout_json(json_result)\n", "path": "conda/cli/main_clean.py"}]} | 3,850 | 251 |
gh_patches_debug_29965 | rasdani/github-patches | git_diff | openai__gym-1549 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VectorEnv seems much slower than SubprocVecEnv
The following test script shows a large performance difference between SubprocVecEnv and VectorEnv:
```
elapsed_vectorenv 24.20034408569336
elapsed_subprocvecenv 0.813650369644165
```
```
from gym.vector import make
from baselines.common.vec_env import SubprocVecEnv
import numpy as np
import time
import gym
NUM_STEPS = 1000
def run_vectorenv():
venv = make("PongNoFrameskip-v4", num_envs=3)
venv.reset()
start = time.time()
for _ in range(NUM_STEPS):
obs, rews, dones, infos = venv.step(venv.action_space.sample())
elapsed = time.time() - start
venv.close()
return elapsed
def run_subprocvecenv():
def make_env():
return gym.make("PongNoFrameskip-v4")
venv = SubprocVecEnv([make_env] * 3)
venv.reset()
start = time.time()
for _ in range(NUM_STEPS):
obs, rews, dones, infos = venv.step(np.array([venv.action_space.sample() for _ in range(venv.num_envs)]))
elapsed = time.time() - start
venv.close()
return elapsed
def main():
elapsed_vectorenv = run_vectorenv()
elapsed_subprocvecenv = run_subprocvecenv()
print("elapsed_vectorenv", elapsed_vectorenv)
print("elapsed_subprocvecenv", elapsed_subprocvecenv)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/vector/utils/shared_memory.py`
Content:
```
1 import numpy as np
2 from multiprocessing import Array
3 from ctypes import c_bool
4 from collections import OrderedDict
5
6 from gym import logger
7 from gym.spaces import Tuple, Dict
8 from gym.vector.utils.spaces import _BaseGymSpaces
9
10 __all__ = [
11 'create_shared_memory',
12 'read_from_shared_memory',
13 'write_to_shared_memory'
14 ]
15
16 def create_shared_memory(space, n=1):
17 """Create a shared memory object, to be shared across processes. This
18 eventually contains the observations from the vectorized environment.
19
20 Parameters
21 ----------
22 space : `gym.spaces.Space` instance
23 Observation space of a single environment in the vectorized environment.
24
25 n : int
26 Number of environments in the vectorized environment (i.e. the number
27 of processes).
28
29 Returns
30 -------
31 shared_memory : dict, tuple, or `multiprocessing.Array` instance
32 Shared object across processes.
33 """
34 if isinstance(space, _BaseGymSpaces):
35 return create_base_shared_memory(space, n=n)
36 elif isinstance(space, Tuple):
37 return create_tuple_shared_memory(space, n=n)
38 elif isinstance(space, Dict):
39 return create_dict_shared_memory(space, n=n)
40 else:
41 raise NotImplementedError()
42
43 def create_base_shared_memory(space, n=1):
44 dtype = space.dtype.char
45 if dtype in '?':
46 dtype = c_bool
47 return Array(dtype, n * int(np.prod(space.shape)))
48
49 def create_tuple_shared_memory(space, n=1):
50 return tuple(create_shared_memory(subspace, n=n)
51 for subspace in space.spaces)
52
53 def create_dict_shared_memory(space, n=1):
54 return OrderedDict([(key, create_shared_memory(subspace, n=n))
55 for (key, subspace) in space.spaces.items()])
56
57
58 def read_from_shared_memory(shared_memory, space, n=1):
59 """Read the batch of observations from shared memory as a numpy array.
60
61 Parameters
62 ----------
63 shared_memory : dict, tuple, or `multiprocessing.Array` instance
64 Shared object across processes. This contains the observations from the
65 vectorized environment. This object is created with `create_shared_memory`.
66
67 space : `gym.spaces.Space` instance
68 Observation space of a single environment in the vectorized environment.
69
70 n : int
71 Number of environments in the vectorized environment (i.e. the number
72 of processes).
73
74 Returns
75 -------
76 observations : dict, tuple or `np.ndarray` instance
77 Batch of observations as a (possibly nested) numpy array.
78
79 Notes
80 -----
81 The numpy array objects returned by `read_from_shared_memory` shares the
82 memory of `shared_memory`. Any changes to `shared_memory` are forwarded
83 to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`.
84 """
85 if isinstance(space, _BaseGymSpaces):
86 return read_base_from_shared_memory(shared_memory, space, n=n)
87 elif isinstance(space, Tuple):
88 return read_tuple_from_shared_memory(shared_memory, space, n=n)
89 elif isinstance(space, Dict):
90 return read_dict_from_shared_memory(shared_memory, space, n=n)
91 else:
92 raise NotImplementedError()
93
94 def read_base_from_shared_memory(shared_memory, space, n=1):
95 return np.frombuffer(shared_memory.get_obj(),
96 dtype=space.dtype).reshape((n,) + space.shape)
97
98 def read_tuple_from_shared_memory(shared_memory, space, n=1):
99 return tuple(read_from_shared_memory(memory, subspace, n=n)
100 for (memory, subspace) in zip(shared_memory, space.spaces))
101
102 def read_dict_from_shared_memory(shared_memory, space, n=1):
103 return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))
104 for ((key, memory), subspace) in zip(shared_memory.items(),
105 space.spaces.values())])
106
107
108 def write_to_shared_memory(index, value, shared_memory, space):
109 """Write the observation of a single environment into shared memory.
110
111 Parameters
112 ----------
113 index : int
114 Index of the environment (must be in `[0, num_envs)`).
115
116 value : sample from `space`
117 Observation of the single environment to write to shared memory.
118
119 shared_memory : dict, tuple, or `multiprocessing.Array` instance
120 Shared object across processes. This contains the observations from the
121 vectorized environment. This object is created with `create_shared_memory`.
122
123 space : `gym.spaces.Space` instance
124 Observation space of a single environment in the vectorized environment.
125
126 Returns
127 -------
128 `None`
129 """
130 if isinstance(space, _BaseGymSpaces):
131 write_base_to_shared_memory(index, value, shared_memory, space)
132 elif isinstance(space, Tuple):
133 write_tuple_to_shared_memory(index, value, shared_memory, space)
134 elif isinstance(space, Dict):
135 write_dict_to_shared_memory(index, value, shared_memory, space)
136 else:
137 raise NotImplementedError()
138
139 def write_base_to_shared_memory(index, value, shared_memory, space):
140 size = int(np.prod(space.shape))
141 shared_memory[index * size:(index + 1) * size] = np.asarray(value,
142 dtype=space.dtype).flatten()
143
144 def write_tuple_to_shared_memory(index, values, shared_memory, space):
145 for value, memory, subspace in zip(values, shared_memory, space.spaces):
146 write_to_shared_memory(index, value, memory, subspace)
147
148 def write_dict_to_shared_memory(index, values, shared_memory, space):
149 for key, value in values.items():
150 write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/vector/utils/shared_memory.py b/gym/vector/utils/shared_memory.py
--- a/gym/vector/utils/shared_memory.py
+++ b/gym/vector/utils/shared_memory.py
@@ -100,9 +100,8 @@
for (memory, subspace) in zip(shared_memory, space.spaces))
def read_dict_from_shared_memory(shared_memory, space, n=1):
- return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))
- for ((key, memory), subspace) in zip(shared_memory.items(),
- space.spaces.values())])
+ return OrderedDict([(key, read_from_shared_memory(shared_memory[key],
+ subspace, n=n)) for (key, subspace) in space.spaces.items()])
def write_to_shared_memory(index, value, shared_memory, space):
@@ -138,13 +137,14 @@
def write_base_to_shared_memory(index, value, shared_memory, space):
size = int(np.prod(space.shape))
- shared_memory[index * size:(index + 1) * size] = np.asarray(value,
- dtype=space.dtype).flatten()
+ destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)
+ np.copyto(destination[index * size:(index + 1) * size], np.asarray(
+ value, dtype=space.dtype).flatten())
def write_tuple_to_shared_memory(index, values, shared_memory, space):
for value, memory, subspace in zip(values, shared_memory, space.spaces):
write_to_shared_memory(index, value, memory, subspace)
def write_dict_to_shared_memory(index, values, shared_memory, space):
- for key, value in values.items():
- write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])
+ for key, subspace in space.spaces.items():
+ write_to_shared_memory(index, values[key], shared_memory[key], subspace)
| {"golden_diff": "diff --git a/gym/vector/utils/shared_memory.py b/gym/vector/utils/shared_memory.py\n--- a/gym/vector/utils/shared_memory.py\n+++ b/gym/vector/utils/shared_memory.py\n@@ -100,9 +100,8 @@\n for (memory, subspace) in zip(shared_memory, space.spaces))\n \n def read_dict_from_shared_memory(shared_memory, space, n=1):\n- return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))\n- for ((key, memory), subspace) in zip(shared_memory.items(), \n- space.spaces.values())])\n+ return OrderedDict([(key, read_from_shared_memory(shared_memory[key],\n+ subspace, n=n)) for (key, subspace) in space.spaces.items()])\n \n \n def write_to_shared_memory(index, value, shared_memory, space):\n@@ -138,13 +137,14 @@\n \n def write_base_to_shared_memory(index, value, shared_memory, space):\n size = int(np.prod(space.shape))\n- shared_memory[index * size:(index + 1) * size] = np.asarray(value,\n- dtype=space.dtype).flatten()\n+ destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)\n+ np.copyto(destination[index * size:(index + 1) * size], np.asarray(\n+ value, dtype=space.dtype).flatten())\n \n def write_tuple_to_shared_memory(index, values, shared_memory, space):\n for value, memory, subspace in zip(values, shared_memory, space.spaces):\n write_to_shared_memory(index, value, memory, subspace)\n \n def write_dict_to_shared_memory(index, values, shared_memory, space):\n- for key, value in values.items():\n- write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])\n+ for key, subspace in space.spaces.items():\n+ write_to_shared_memory(index, values[key], shared_memory[key], subspace)\n", "issue": "VectorEnv seems much slower than SubprocVecEnv\nThe following test script shows a large performance difference between SubprocVecEnv and VectorEnv:\r\n\r\n```\r\nelapsed_vectorenv 24.20034408569336\r\nelapsed_subprocvecenv 0.813650369644165\r\n```\r\n\r\n```\r\nfrom gym.vector import make\r\nfrom baselines.common.vec_env import SubprocVecEnv\r\nimport numpy as np\r\nimport time\r\nimport gym\r\n\r\nNUM_STEPS = 1000\r\n\r\ndef run_vectorenv():\r\n venv = make(\"PongNoFrameskip-v4\", num_envs=3)\r\n venv.reset()\r\n start = time.time()\r\n for _ in range(NUM_STEPS):\r\n obs, rews, dones, infos = venv.step(venv.action_space.sample())\r\n elapsed = time.time() - start\r\n venv.close()\r\n return elapsed\r\n\r\n\r\ndef run_subprocvecenv():\r\n def make_env():\r\n return gym.make(\"PongNoFrameskip-v4\")\r\n\r\n venv = SubprocVecEnv([make_env] * 3)\r\n venv.reset()\r\n start = time.time()\r\n for _ in range(NUM_STEPS):\r\n obs, rews, dones, infos = venv.step(np.array([venv.action_space.sample() for _ in range(venv.num_envs)]))\r\n elapsed = time.time() - start\r\n venv.close()\r\n return elapsed\r\n\r\n\r\ndef main():\r\n elapsed_vectorenv = run_vectorenv()\r\n elapsed_subprocvecenv = run_subprocvecenv()\r\n print(\"elapsed_vectorenv\", elapsed_vectorenv)\r\n print(\"elapsed_subprocvecenv\", elapsed_subprocvecenv)\r\n```\n", "before_files": [{"content": "import numpy as np\nfrom multiprocessing import Array\nfrom ctypes import c_bool\nfrom collections import OrderedDict\n\nfrom gym import logger\nfrom gym.spaces import Tuple, Dict\nfrom gym.vector.utils.spaces import _BaseGymSpaces\n\n__all__ = [\n 'create_shared_memory',\n 'read_from_shared_memory',\n 'write_to_shared_memory'\n]\n\ndef create_shared_memory(space, n=1):\n \"\"\"Create a shared memory object, to be shared across processes. This\n eventually contains the observations from the vectorized environment.\n\n Parameters\n ----------\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n n : int\n Number of environments in the vectorized environment (i.e. the number\n of processes).\n\n Returns\n -------\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes.\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n return create_base_shared_memory(space, n=n)\n elif isinstance(space, Tuple):\n return create_tuple_shared_memory(space, n=n)\n elif isinstance(space, Dict):\n return create_dict_shared_memory(space, n=n)\n else:\n raise NotImplementedError()\n\ndef create_base_shared_memory(space, n=1):\n dtype = space.dtype.char\n if dtype in '?':\n dtype = c_bool\n return Array(dtype, n * int(np.prod(space.shape)))\n\ndef create_tuple_shared_memory(space, n=1):\n return tuple(create_shared_memory(subspace, n=n)\n for subspace in space.spaces)\n\ndef create_dict_shared_memory(space, n=1):\n return OrderedDict([(key, create_shared_memory(subspace, n=n))\n for (key, subspace) in space.spaces.items()])\n\n\ndef read_from_shared_memory(shared_memory, space, n=1):\n \"\"\"Read the batch of observations from shared memory as a numpy array.\n\n Parameters\n ----------\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes. This contains the observations from the\n vectorized environment. This object is created with `create_shared_memory`.\n\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n n : int\n Number of environments in the vectorized environment (i.e. the number\n of processes).\n\n Returns\n -------\n observations : dict, tuple or `np.ndarray` instance\n Batch of observations as a (possibly nested) numpy array.\n\n Notes\n -----\n The numpy array objects returned by `read_from_shared_memory` shares the\n memory of `shared_memory`. Any changes to `shared_memory` are forwarded\n to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`.\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n return read_base_from_shared_memory(shared_memory, space, n=n)\n elif isinstance(space, Tuple):\n return read_tuple_from_shared_memory(shared_memory, space, n=n)\n elif isinstance(space, Dict):\n return read_dict_from_shared_memory(shared_memory, space, n=n)\n else:\n raise NotImplementedError()\n\ndef read_base_from_shared_memory(shared_memory, space, n=1):\n return np.frombuffer(shared_memory.get_obj(),\n dtype=space.dtype).reshape((n,) + space.shape)\n\ndef read_tuple_from_shared_memory(shared_memory, space, n=1):\n return tuple(read_from_shared_memory(memory, subspace, n=n)\n for (memory, subspace) in zip(shared_memory, space.spaces))\n\ndef read_dict_from_shared_memory(shared_memory, space, n=1):\n return OrderedDict([(key, read_from_shared_memory(memory, subspace, n=n))\n for ((key, memory), subspace) in zip(shared_memory.items(), \n space.spaces.values())])\n\n\ndef write_to_shared_memory(index, value, shared_memory, space):\n \"\"\"Write the observation of a single environment into shared memory.\n\n Parameters\n ----------\n index : int\n Index of the environment (must be in `[0, num_envs)`).\n\n value : sample from `space`\n Observation of the single environment to write to shared memory.\n\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes. This contains the observations from the\n vectorized environment. This object is created with `create_shared_memory`.\n\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n Returns\n -------\n `None`\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n write_base_to_shared_memory(index, value, shared_memory, space)\n elif isinstance(space, Tuple):\n write_tuple_to_shared_memory(index, value, shared_memory, space)\n elif isinstance(space, Dict):\n write_dict_to_shared_memory(index, value, shared_memory, space)\n else:\n raise NotImplementedError()\n\ndef write_base_to_shared_memory(index, value, shared_memory, space):\n size = int(np.prod(space.shape))\n shared_memory[index * size:(index + 1) * size] = np.asarray(value,\n dtype=space.dtype).flatten()\n\ndef write_tuple_to_shared_memory(index, values, shared_memory, space):\n for value, memory, subspace in zip(values, shared_memory, space.spaces):\n write_to_shared_memory(index, value, memory, subspace)\n\ndef write_dict_to_shared_memory(index, values, shared_memory, space):\n for key, value in values.items():\n write_to_shared_memory(index, value, shared_memory[key], space.spaces[key])\n", "path": "gym/vector/utils/shared_memory.py"}], "after_files": [{"content": "import numpy as np\nfrom multiprocessing import Array\nfrom ctypes import c_bool\nfrom collections import OrderedDict\n\nfrom gym import logger\nfrom gym.spaces import Tuple, Dict\nfrom gym.vector.utils.spaces import _BaseGymSpaces\n\n__all__ = [\n 'create_shared_memory',\n 'read_from_shared_memory',\n 'write_to_shared_memory'\n]\n\ndef create_shared_memory(space, n=1):\n \"\"\"Create a shared memory object, to be shared across processes. This\n eventually contains the observations from the vectorized environment.\n\n Parameters\n ----------\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n n : int\n Number of environments in the vectorized environment (i.e. the number\n of processes).\n\n Returns\n -------\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes.\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n return create_base_shared_memory(space, n=n)\n elif isinstance(space, Tuple):\n return create_tuple_shared_memory(space, n=n)\n elif isinstance(space, Dict):\n return create_dict_shared_memory(space, n=n)\n else:\n raise NotImplementedError()\n\ndef create_base_shared_memory(space, n=1):\n dtype = space.dtype.char\n if dtype in '?':\n dtype = c_bool\n return Array(dtype, n * int(np.prod(space.shape)))\n\ndef create_tuple_shared_memory(space, n=1):\n return tuple(create_shared_memory(subspace, n=n)\n for subspace in space.spaces)\n\ndef create_dict_shared_memory(space, n=1):\n return OrderedDict([(key, create_shared_memory(subspace, n=n))\n for (key, subspace) in space.spaces.items()])\n\n\ndef read_from_shared_memory(shared_memory, space, n=1):\n \"\"\"Read the batch of observations from shared memory as a numpy array.\n\n Parameters\n ----------\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes. This contains the observations from the\n vectorized environment. This object is created with `create_shared_memory`.\n\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n n : int\n Number of environments in the vectorized environment (i.e. the number\n of processes).\n\n Returns\n -------\n observations : dict, tuple or `np.ndarray` instance\n Batch of observations as a (possibly nested) numpy array.\n\n Notes\n -----\n The numpy array objects returned by `read_from_shared_memory` shares the\n memory of `shared_memory`. Any changes to `shared_memory` are forwarded\n to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`.\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n return read_base_from_shared_memory(shared_memory, space, n=n)\n elif isinstance(space, Tuple):\n return read_tuple_from_shared_memory(shared_memory, space, n=n)\n elif isinstance(space, Dict):\n return read_dict_from_shared_memory(shared_memory, space, n=n)\n else:\n raise NotImplementedError()\n\ndef read_base_from_shared_memory(shared_memory, space, n=1):\n return np.frombuffer(shared_memory.get_obj(),\n dtype=space.dtype).reshape((n,) + space.shape)\n\ndef read_tuple_from_shared_memory(shared_memory, space, n=1):\n return tuple(read_from_shared_memory(memory, subspace, n=n)\n for (memory, subspace) in zip(shared_memory, space.spaces))\n\ndef read_dict_from_shared_memory(shared_memory, space, n=1):\n return OrderedDict([(key, read_from_shared_memory(shared_memory[key],\n subspace, n=n)) for (key, subspace) in space.spaces.items()])\n\n\ndef write_to_shared_memory(index, value, shared_memory, space):\n \"\"\"Write the observation of a single environment into shared memory.\n\n Parameters\n ----------\n index : int\n Index of the environment (must be in `[0, num_envs)`).\n\n value : sample from `space`\n Observation of the single environment to write to shared memory.\n\n shared_memory : dict, tuple, or `multiprocessing.Array` instance\n Shared object across processes. This contains the observations from the\n vectorized environment. This object is created with `create_shared_memory`.\n\n space : `gym.spaces.Space` instance\n Observation space of a single environment in the vectorized environment.\n\n Returns\n -------\n `None`\n \"\"\"\n if isinstance(space, _BaseGymSpaces):\n write_base_to_shared_memory(index, value, shared_memory, space)\n elif isinstance(space, Tuple):\n write_tuple_to_shared_memory(index, value, shared_memory, space)\n elif isinstance(space, Dict):\n write_dict_to_shared_memory(index, value, shared_memory, space)\n else:\n raise NotImplementedError()\n\ndef write_base_to_shared_memory(index, value, shared_memory, space):\n size = int(np.prod(space.shape))\n destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)\n np.copyto(destination[index * size:(index + 1) * size], np.asarray(\n value, dtype=space.dtype).flatten())\n\ndef write_tuple_to_shared_memory(index, values, shared_memory, space):\n for value, memory, subspace in zip(values, shared_memory, space.spaces):\n write_to_shared_memory(index, value, memory, subspace)\n\ndef write_dict_to_shared_memory(index, values, shared_memory, space):\n for key, subspace in space.spaces.items():\n write_to_shared_memory(index, values[key], shared_memory[key], subspace)\n", "path": "gym/vector/utils/shared_memory.py"}]} | 2,200 | 428 |
gh_patches_debug_37156 | rasdani/github-patches | git_diff | streamlink__streamlink-3949 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.svtplay: Cannot resolve playable stream
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
When trying to play for example https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
It does not seem to parse the information correctly anymore to resolve a playable stream. This plugin compared to the one for tv4play is parsing the HTML page in order to resolve where to go if you are on the program page instead of the episode page.
This lookup resolves to nothing anymore:
```
latest_episode_url_re = re.compile(r'''
class="play_titlepage__latest-video"\s+href="(?P<url>[^"]+)"
''', re.VERBOSE)
```
When debugging I find that this https://api.svt.se/video/KA2BmZD resolves the JSON.
```
{
"svtId": "KA2BmZD",
"programVersionId": "1400537-002A",
"contentDuration": 3506,
"blockedForChildren": false,
"live": false,
"programTitle": "Draknästet",
"episodeTitle": "Avsnitt 2",
...
}
```
With the following changes it resolves the video_id correctly from the HTML:
```
latest_episode_vod_id = re.compile(r'''
data-rt="top-area-play-button"\s+href=".*(?:id=)(?P<video_id>[^"]+)"
''', re.VERBOSE)
```
If you are directly on the play page of the site you get the vod_id in the URL as a parameter. So I have refactored to support both.
Now it finds the vod_id and initiates the dash stream worker but it still doesn't run.
I get Exception in thread Thread-DASHStreamWorker and this one seems a little more tricky to figure out.
```
[cli][info] Opening stream: 1080p (dash)
[stream.dash][debug] Opening DASH reader for: 0 (video/mp4)
[stream.dash][debug] Opening DASH reader for: 5 (audio/mp4)
[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=0))
[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=5))
[cli][error] Try 1/1: Could not open stream <Stream()> (Could not open stream: cannot use FFMPEG)
[stream.ffmpegmux][debug] Closing ffmpeg thread
error: Could not open stream <Stream()>, tried 1 times, exiting
[stream.dash][debug] Download of segment: https://svt-vod-8r.akamaized.net/d0/se/20210723/841d135d-fb92-4acb-9dff-898c1db4af30/cmaf-video-avc-1920x1080p25-3089/cmaf-video-avc-1920x1080p25-3089-init.mp4 complete
Exception in thread Thread-DASHStreamWorker:
Traceback (most recent call last):
File "/usr/lib64/python3.9/threading.py", line 973, in _bootstrap_inner
self.run()
File "/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py", line 87, in run
self.writer.put(segment)
File "/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py", line 140, in put
future = self.executor.submit(self.fetch, segment, retries=self.retries)
File "/usr/lib64/python3.9/concurrent/futures/thread.py", line 163, in submit
raise RuntimeError('cannot schedule new futures after '
RuntimeError: cannot schedule new futures after interpreter shutdown
```
### Debug log
```text
[cli][debug] OS: Linux-5.13.12-200.fc34.x86_64-x86_64-with-glibc2.33
[cli][debug] Python: 3.9.6
[cli][debug] Streamlink: 2.3.0
[cli][debug] Requests(2.25.1), Socks(1.7.1), Websocket(0.57.0)
[cli][debug] Arguments:
[cli][debug] url=https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --player=mpv
[cli][debug] --verbose-player=True
[cli][debug] --player-passthrough=['hls']
[cli][debug] --hls-segment-threads=2
[cli][info] Found matching plugin svtplay for URL https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
[plugins.svtplay][debug] Path=/
error: No playable streams found on this URL: https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/svtplay.py`
Content:
```
1 import logging
2 import re
3 from urllib.parse import urljoin
4
5 from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
6 from streamlink.plugin.api import validate
7 from streamlink.stream import DASHStream, HTTPStream
8 from streamlink.stream.ffmpegmux import MuxedStream
9
10 log = logging.getLogger(__name__)
11
12
13 @pluginmatcher(re.compile(
14 r'https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se(/(kanaler/)?)'
15 ))
16 class SVTPlay(Plugin):
17 api_url = 'https://api.svt.se/videoplayer-api/video/{0}'
18
19 author = None
20 category = None
21 title = None
22
23 latest_episode_url_re = re.compile(r'''
24 class="play_titlepage__latest-video"\s+href="(?P<url>[^"]+)"
25 ''', re.VERBOSE)
26
27 live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')
28
29 vod_id_re = re.compile(r'''
30 (?:DATA_LAKE\s+=\s+{"content":{"id":|"svtId":|data-video-id=)
31 "(?P<vod_id>[^"]+)"
32 ''', re.VERBOSE)
33
34 _video_schema = validate.Schema({
35 validate.optional('programTitle'): validate.text,
36 validate.optional('episodeTitle'): validate.text,
37 'videoReferences': [{
38 'url': validate.url(),
39 'format': validate.text,
40 }],
41 validate.optional('subtitleReferences'): [{
42 'url': validate.url(),
43 'format': validate.text,
44 }],
45 })
46
47 arguments = PluginArguments(
48 PluginArgument("mux-subtitles", is_global=True)
49 )
50
51 def get_author(self):
52 if self.author is not None:
53 return self.author
54
55 def get_category(self):
56 if self.category is not None:
57 return self.category
58
59 def get_title(self):
60 if self.title is not None:
61 return self.title
62
63 def _set_metadata(self, data, category):
64 if 'programTitle' in data:
65 self.author = data['programTitle']
66
67 self.category = category
68
69 if 'episodeTitle' in data:
70 self.title = data['episodeTitle']
71
72 def _get_live(self, path):
73 match = self.live_id_re.search(path)
74 if match is None:
75 return
76
77 live_id = "ch-{0}".format(match.group('live_id'))
78 log.debug("Live ID={0}".format(live_id))
79
80 res = self.session.http.get(self.api_url.format(live_id))
81 api_data = self.session.http.json(res, schema=self._video_schema)
82
83 self._set_metadata(api_data, 'Live')
84
85 for playlist in api_data['videoReferences']:
86 if playlist['format'] == 'dashhbbtv':
87 yield from DASHStream.parse_manifest(self.session, playlist['url']).items()
88
89 def _get_vod(self):
90 res = self.session.http.get(self.url)
91 match = self.latest_episode_url_re.search(res.text)
92 if match:
93 res = self.session.http.get(
94 urljoin(self.url, match.group('url')),
95 )
96
97 match = self.vod_id_re.search(res.text)
98 if match is None:
99 return
100
101 vod_id = match.group('vod_id')
102 log.debug("VOD ID={0}".format(vod_id))
103
104 res = self.session.http.get(self.api_url.format(vod_id))
105 api_data = self.session.http.json(res, schema=self._video_schema)
106
107 self._set_metadata(api_data, 'VOD')
108
109 substreams = {}
110 if 'subtitleReferences' in api_data:
111 for subtitle in api_data['subtitleReferences']:
112 if subtitle['format'] == 'webvtt':
113 log.debug("Subtitle={0}".format(subtitle['url']))
114 substreams[subtitle['format']] = HTTPStream(
115 self.session,
116 subtitle['url'],
117 )
118
119 for manifest in api_data['videoReferences']:
120 if manifest['format'] == 'dashhbbtv':
121 for q, s in DASHStream.parse_manifest(self.session, manifest['url']).items():
122 if self.get_option('mux_subtitles') and substreams:
123 yield q, MuxedStream(self.session, s, subtitles=substreams)
124 else:
125 yield q, s
126
127 def _get_streams(self):
128 path, live = self.match.groups()
129 log.debug("Path={0}".format(path))
130
131 if live:
132 return self._get_live(path)
133 else:
134 return self._get_vod()
135
136
137 __plugin__ = SVTPlay
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/svtplay.py b/src/streamlink/plugins/svtplay.py
--- a/src/streamlink/plugins/svtplay.py
+++ b/src/streamlink/plugins/svtplay.py
@@ -1,6 +1,6 @@
import logging
import re
-from urllib.parse import urljoin
+from urllib.parse import parse_qsl, urlparse
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
@@ -21,16 +21,11 @@
title = None
latest_episode_url_re = re.compile(r'''
- class="play_titlepage__latest-video"\s+href="(?P<url>[^"]+)"
+ data-rt="top-area-play-button"\s+href="(?P<url>[^"]+)"
''', re.VERBOSE)
live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')
- vod_id_re = re.compile(r'''
- (?:DATA_LAKE\s+=\s+{"content":{"id":|"svtId":|data-video-id=)
- "(?P<vod_id>[^"]+)"
- ''', re.VERBOSE)
-
_video_schema = validate.Schema({
validate.optional('programTitle'): validate.text,
validate.optional('episodeTitle'): validate.text,
@@ -87,18 +82,18 @@
yield from DASHStream.parse_manifest(self.session, playlist['url']).items()
def _get_vod(self):
- res = self.session.http.get(self.url)
- match = self.latest_episode_url_re.search(res.text)
- if match:
- res = self.session.http.get(
- urljoin(self.url, match.group('url')),
- )
-
- match = self.vod_id_re.search(res.text)
- if match is None:
+ vod_id = self._get_vod_id(self.url)
+
+ if vod_id is None:
+ res = self.session.http.get(self.url)
+ match = self.latest_episode_url_re.search(res.text)
+ if match is None:
+ return
+ vod_id = self._get_vod_id(match.group("url"))
+
+ if vod_id is None:
return
- vod_id = match.group('vod_id')
log.debug("VOD ID={0}".format(vod_id))
res = self.session.http.get(self.api_url.format(vod_id))
@@ -124,6 +119,10 @@
else:
yield q, s
+ def _get_vod_id(self, url):
+ qs = dict(parse_qsl(urlparse(url).query))
+ return qs.get("id")
+
def _get_streams(self):
path, live = self.match.groups()
log.debug("Path={0}".format(path))
| {"golden_diff": "diff --git a/src/streamlink/plugins/svtplay.py b/src/streamlink/plugins/svtplay.py\n--- a/src/streamlink/plugins/svtplay.py\n+++ b/src/streamlink/plugins/svtplay.py\n@@ -1,6 +1,6 @@\n import logging\n import re\n-from urllib.parse import urljoin\n+from urllib.parse import parse_qsl, urlparse\n \n from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher\n from streamlink.plugin.api import validate\n@@ -21,16 +21,11 @@\n title = None\n \n latest_episode_url_re = re.compile(r'''\n- class=\"play_titlepage__latest-video\"\\s+href=\"(?P<url>[^\"]+)\"\n+ data-rt=\"top-area-play-button\"\\s+href=\"(?P<url>[^\"]+)\"\n ''', re.VERBOSE)\n \n live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')\n \n- vod_id_re = re.compile(r'''\n- (?:DATA_LAKE\\s+=\\s+{\"content\":{\"id\":|\"svtId\":|data-video-id=)\n- \"(?P<vod_id>[^\"]+)\"\n- ''', re.VERBOSE)\n-\n _video_schema = validate.Schema({\n validate.optional('programTitle'): validate.text,\n validate.optional('episodeTitle'): validate.text,\n@@ -87,18 +82,18 @@\n yield from DASHStream.parse_manifest(self.session, playlist['url']).items()\n \n def _get_vod(self):\n- res = self.session.http.get(self.url)\n- match = self.latest_episode_url_re.search(res.text)\n- if match:\n- res = self.session.http.get(\n- urljoin(self.url, match.group('url')),\n- )\n-\n- match = self.vod_id_re.search(res.text)\n- if match is None:\n+ vod_id = self._get_vod_id(self.url)\n+\n+ if vod_id is None:\n+ res = self.session.http.get(self.url)\n+ match = self.latest_episode_url_re.search(res.text)\n+ if match is None:\n+ return\n+ vod_id = self._get_vod_id(match.group(\"url\"))\n+\n+ if vod_id is None:\n return\n \n- vod_id = match.group('vod_id')\n log.debug(\"VOD ID={0}\".format(vod_id))\n \n res = self.session.http.get(self.api_url.format(vod_id))\n@@ -124,6 +119,10 @@\n else:\n yield q, s\n \n+ def _get_vod_id(self, url):\n+ qs = dict(parse_qsl(urlparse(url).query))\n+ return qs.get(\"id\")\n+\n def _get_streams(self):\n path, live = self.match.groups()\n log.debug(\"Path={0}\".format(path))\n", "issue": "plugins.svtplay: Cannot resolve playable stream\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nWhen trying to play for example https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\r\n\r\nIt does not seem to parse the information correctly anymore to resolve a playable stream. This plugin compared to the one for tv4play is parsing the HTML page in order to resolve where to go if you are on the program page instead of the episode page.\r\n\r\nThis lookup resolves to nothing anymore:\r\n\r\n```\r\nlatest_episode_url_re = re.compile(r'''\r\n class=\"play_titlepage__latest-video\"\\s+href=\"(?P<url>[^\"]+)\"\r\n ''', re.VERBOSE)\r\n```\r\n\r\nWhen debugging I find that this https://api.svt.se/video/KA2BmZD resolves the JSON.\r\n\r\n```\r\n{\r\n \"svtId\": \"KA2BmZD\",\r\n \"programVersionId\": \"1400537-002A\",\r\n \"contentDuration\": 3506,\r\n \"blockedForChildren\": false,\r\n \"live\": false,\r\n \"programTitle\": \"Drakn\u00e4stet\",\r\n \"episodeTitle\": \"Avsnitt 2\",\r\n ...\r\n }\r\n```\r\n\r\nWith the following changes it resolves the video_id correctly from the HTML:\r\n```\r\nlatest_episode_vod_id = re.compile(r'''\r\n data-rt=\"top-area-play-button\"\\s+href=\".*(?:id=)(?P<video_id>[^\"]+)\"\r\n ''', re.VERBOSE)\r\n```\r\n\r\nIf you are directly on the play page of the site you get the vod_id in the URL as a parameter. So I have refactored to support both.\r\n\r\nNow it finds the vod_id and initiates the dash stream worker but it still doesn't run.\r\n\r\nI get Exception in thread Thread-DASHStreamWorker and this one seems a little more tricky to figure out.\r\n\r\n```\r\n[cli][info] Opening stream: 1080p (dash)\r\n[stream.dash][debug] Opening DASH reader for: 0 (video/mp4)\r\n[stream.dash][debug] Opening DASH reader for: 5 (audio/mp4)\r\n[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=0))\r\n[stream.dash_manifest][debug] Generating segment timeline for static playlist (id=5))\r\n[cli][error] Try 1/1: Could not open stream <Stream()> (Could not open stream: cannot use FFMPEG)\r\n[stream.ffmpegmux][debug] Closing ffmpeg thread\r\nerror: Could not open stream <Stream()>, tried 1 times, exiting\r\n[stream.dash][debug] Download of segment: https://svt-vod-8r.akamaized.net/d0/se/20210723/841d135d-fb92-4acb-9dff-898c1db4af30/cmaf-video-avc-1920x1080p25-3089/cmaf-video-avc-1920x1080p25-3089-init.mp4 complete\r\nException in thread Thread-DASHStreamWorker:\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.9/threading.py\", line 973, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py\", line 87, in run\r\n self.writer.put(segment)\r\n File \"/usr/local/lib/python3.9/site-packages/streamlink-2.1.2+79.g2b9ca5d.dirty-py3.9.egg/streamlink/stream/segmented.py\", line 140, in put\r\n future = self.executor.submit(self.fetch, segment, retries=self.retries)\r\n File \"/usr/lib64/python3.9/concurrent/futures/thread.py\", line 163, in submit\r\n raise RuntimeError('cannot schedule new futures after '\r\nRuntimeError: cannot schedule new futures after interpreter shutdown\r\n```\r\n\n\n### Debug log\n\n```text\n[cli][debug] OS: Linux-5.13.12-200.fc34.x86_64-x86_64-with-glibc2.33\r\n[cli][debug] Python: 3.9.6\r\n[cli][debug] Streamlink: 2.3.0\r\n[cli][debug] Requests(2.25.1), Socks(1.7.1), Websocket(0.57.0)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=mpv\r\n[cli][debug] --verbose-player=True\r\n[cli][debug] --player-passthrough=['hls']\r\n[cli][debug] --hls-segment-threads=2\r\n[cli][info] Found matching plugin svtplay for URL https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\r\n[plugins.svtplay][debug] Path=/\r\nerror: No playable streams found on this URL: https://www.svtplay.se/video/32279075/draknastet/draknastet-sasong-1-avsnitt-2?id=KA2BmZD\n```\n\n", "before_files": [{"content": "import logging\nimport re\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import DASHStream, HTTPStream\nfrom streamlink.stream.ffmpegmux import MuxedStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r'https?://(?:www\\.)?(?:svtplay|oppetarkiv)\\.se(/(kanaler/)?)'\n))\nclass SVTPlay(Plugin):\n api_url = 'https://api.svt.se/videoplayer-api/video/{0}'\n\n author = None\n category = None\n title = None\n\n latest_episode_url_re = re.compile(r'''\n class=\"play_titlepage__latest-video\"\\s+href=\"(?P<url>[^\"]+)\"\n ''', re.VERBOSE)\n\n live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')\n\n vod_id_re = re.compile(r'''\n (?:DATA_LAKE\\s+=\\s+{\"content\":{\"id\":|\"svtId\":|data-video-id=)\n \"(?P<vod_id>[^\"]+)\"\n ''', re.VERBOSE)\n\n _video_schema = validate.Schema({\n validate.optional('programTitle'): validate.text,\n validate.optional('episodeTitle'): validate.text,\n 'videoReferences': [{\n 'url': validate.url(),\n 'format': validate.text,\n }],\n validate.optional('subtitleReferences'): [{\n 'url': validate.url(),\n 'format': validate.text,\n }],\n })\n\n arguments = PluginArguments(\n PluginArgument(\"mux-subtitles\", is_global=True)\n )\n\n def get_author(self):\n if self.author is not None:\n return self.author\n\n def get_category(self):\n if self.category is not None:\n return self.category\n\n def get_title(self):\n if self.title is not None:\n return self.title\n\n def _set_metadata(self, data, category):\n if 'programTitle' in data:\n self.author = data['programTitle']\n\n self.category = category\n\n if 'episodeTitle' in data:\n self.title = data['episodeTitle']\n\n def _get_live(self, path):\n match = self.live_id_re.search(path)\n if match is None:\n return\n\n live_id = \"ch-{0}\".format(match.group('live_id'))\n log.debug(\"Live ID={0}\".format(live_id))\n\n res = self.session.http.get(self.api_url.format(live_id))\n api_data = self.session.http.json(res, schema=self._video_schema)\n\n self._set_metadata(api_data, 'Live')\n\n for playlist in api_data['videoReferences']:\n if playlist['format'] == 'dashhbbtv':\n yield from DASHStream.parse_manifest(self.session, playlist['url']).items()\n\n def _get_vod(self):\n res = self.session.http.get(self.url)\n match = self.latest_episode_url_re.search(res.text)\n if match:\n res = self.session.http.get(\n urljoin(self.url, match.group('url')),\n )\n\n match = self.vod_id_re.search(res.text)\n if match is None:\n return\n\n vod_id = match.group('vod_id')\n log.debug(\"VOD ID={0}\".format(vod_id))\n\n res = self.session.http.get(self.api_url.format(vod_id))\n api_data = self.session.http.json(res, schema=self._video_schema)\n\n self._set_metadata(api_data, 'VOD')\n\n substreams = {}\n if 'subtitleReferences' in api_data:\n for subtitle in api_data['subtitleReferences']:\n if subtitle['format'] == 'webvtt':\n log.debug(\"Subtitle={0}\".format(subtitle['url']))\n substreams[subtitle['format']] = HTTPStream(\n self.session,\n subtitle['url'],\n )\n\n for manifest in api_data['videoReferences']:\n if manifest['format'] == 'dashhbbtv':\n for q, s in DASHStream.parse_manifest(self.session, manifest['url']).items():\n if self.get_option('mux_subtitles') and substreams:\n yield q, MuxedStream(self.session, s, subtitles=substreams)\n else:\n yield q, s\n\n def _get_streams(self):\n path, live = self.match.groups()\n log.debug(\"Path={0}\".format(path))\n\n if live:\n return self._get_live(path)\n else:\n return self._get_vod()\n\n\n__plugin__ = SVTPlay\n", "path": "src/streamlink/plugins/svtplay.py"}], "after_files": [{"content": "import logging\nimport re\nfrom urllib.parse import parse_qsl, urlparse\n\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import DASHStream, HTTPStream\nfrom streamlink.stream.ffmpegmux import MuxedStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r'https?://(?:www\\.)?(?:svtplay|oppetarkiv)\\.se(/(kanaler/)?)'\n))\nclass SVTPlay(Plugin):\n api_url = 'https://api.svt.se/videoplayer-api/video/{0}'\n\n author = None\n category = None\n title = None\n\n latest_episode_url_re = re.compile(r'''\n data-rt=\"top-area-play-button\"\\s+href=\"(?P<url>[^\"]+)\"\n ''', re.VERBOSE)\n\n live_id_re = re.compile(r'.*/(?P<live_id>[^?]+)')\n\n _video_schema = validate.Schema({\n validate.optional('programTitle'): validate.text,\n validate.optional('episodeTitle'): validate.text,\n 'videoReferences': [{\n 'url': validate.url(),\n 'format': validate.text,\n }],\n validate.optional('subtitleReferences'): [{\n 'url': validate.url(),\n 'format': validate.text,\n }],\n })\n\n arguments = PluginArguments(\n PluginArgument(\"mux-subtitles\", is_global=True)\n )\n\n def get_author(self):\n if self.author is not None:\n return self.author\n\n def get_category(self):\n if self.category is not None:\n return self.category\n\n def get_title(self):\n if self.title is not None:\n return self.title\n\n def _set_metadata(self, data, category):\n if 'programTitle' in data:\n self.author = data['programTitle']\n\n self.category = category\n\n if 'episodeTitle' in data:\n self.title = data['episodeTitle']\n\n def _get_live(self, path):\n match = self.live_id_re.search(path)\n if match is None:\n return\n\n live_id = \"ch-{0}\".format(match.group('live_id'))\n log.debug(\"Live ID={0}\".format(live_id))\n\n res = self.session.http.get(self.api_url.format(live_id))\n api_data = self.session.http.json(res, schema=self._video_schema)\n\n self._set_metadata(api_data, 'Live')\n\n for playlist in api_data['videoReferences']:\n if playlist['format'] == 'dashhbbtv':\n yield from DASHStream.parse_manifest(self.session, playlist['url']).items()\n\n def _get_vod(self):\n vod_id = self._get_vod_id(self.url)\n\n if vod_id is None:\n res = self.session.http.get(self.url)\n match = self.latest_episode_url_re.search(res.text)\n if match is None:\n return\n vod_id = self._get_vod_id(match.group(\"url\"))\n\n if vod_id is None:\n return\n\n log.debug(\"VOD ID={0}\".format(vod_id))\n\n res = self.session.http.get(self.api_url.format(vod_id))\n api_data = self.session.http.json(res, schema=self._video_schema)\n\n self._set_metadata(api_data, 'VOD')\n\n substreams = {}\n if 'subtitleReferences' in api_data:\n for subtitle in api_data['subtitleReferences']:\n if subtitle['format'] == 'webvtt':\n log.debug(\"Subtitle={0}\".format(subtitle['url']))\n substreams[subtitle['format']] = HTTPStream(\n self.session,\n subtitle['url'],\n )\n\n for manifest in api_data['videoReferences']:\n if manifest['format'] == 'dashhbbtv':\n for q, s in DASHStream.parse_manifest(self.session, manifest['url']).items():\n if self.get_option('mux_subtitles') and substreams:\n yield q, MuxedStream(self.session, s, subtitles=substreams)\n else:\n yield q, s\n\n def _get_vod_id(self, url):\n qs = dict(parse_qsl(urlparse(url).query))\n return qs.get(\"id\")\n\n def _get_streams(self):\n path, live = self.match.groups()\n log.debug(\"Path={0}\".format(path))\n\n if live:\n return self._get_live(path)\n else:\n return self._get_vod()\n\n\n__plugin__ = SVTPlay\n", "path": "src/streamlink/plugins/svtplay.py"}]} | 3,019 | 630 |
gh_patches_debug_3324 | rasdani/github-patches | git_diff | pandas-dev__pandas-4991 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Unexpected behaviour when reading large text files with mixed datatypes
read_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg
``` python
>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})
len(set(df.colA))
500001
>>> df.to_csv('testpandas2.txt')
>>> df2=read_csv('testpandas2.txt')
>>> len(set(df2.colA))
762143
>>> pandas.__version__
'0.11.0'
```
It seems some of the integers are parsed as integers and others as strings.
``` python
>>> list(set(df2.colA))[-10:]
['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']
>>> list(set(df2.colA))[:10]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```
BUG: Unexpected behaviour when reading large text files with mixed datatypes
read_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg
``` python
>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})
len(set(df.colA))
500001
>>> df.to_csv('testpandas2.txt')
>>> df2=read_csv('testpandas2.txt')
>>> len(set(df2.colA))
762143
>>> pandas.__version__
'0.11.0'
```
It seems some of the integers are parsed as integers and others as strings.
``` python
>>> list(set(df2.colA))[-10:]
['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']
>>> list(set(df2.colA))[:10]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/io/common.py`
Content:
```
1 """Common IO api utilities"""
2
3 import sys
4 import zipfile
5 from contextlib import contextmanager, closing
6
7 from pandas.compat import StringIO
8 from pandas import compat
9
10
11 if compat.PY3:
12 from urllib.request import urlopen
13 _urlopen = urlopen
14 from urllib.parse import urlparse as parse_url
15 import urllib.parse as compat_parse
16 from urllib.parse import uses_relative, uses_netloc, uses_params, urlencode
17 from urllib.error import URLError
18 from http.client import HTTPException
19 else:
20 from urllib2 import urlopen as _urlopen
21 from urllib import urlencode
22 from urlparse import urlparse as parse_url
23 from urlparse import uses_relative, uses_netloc, uses_params
24 from urllib2 import URLError
25 from httplib import HTTPException
26 from contextlib import contextmanager, closing
27 from functools import wraps
28
29 # @wraps(_urlopen)
30 @contextmanager
31 def urlopen(*args, **kwargs):
32 with closing(_urlopen(*args, **kwargs)) as f:
33 yield f
34
35
36 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
37 _VALID_URLS.discard('')
38
39 class PerformanceWarning(Warning):
40 pass
41
42
43 def _is_url(url):
44 """Check to see if a URL has a valid protocol.
45
46 Parameters
47 ----------
48 url : str or unicode
49
50 Returns
51 -------
52 isurl : bool
53 If `url` has a valid protocol return True otherwise False.
54 """
55 try:
56 return parse_url(url).scheme in _VALID_URLS
57 except:
58 return False
59
60
61 def _is_s3_url(url):
62 """Check for an s3 url"""
63 try:
64 return parse_url(url).scheme == 's3'
65 except:
66 return False
67
68
69 def maybe_read_encoded_stream(reader, encoding=None):
70 """ read an encoded stream from the reader and transform the bytes to unicode
71 if required based on the encoding
72
73 Parameters
74 ----------
75 reader : a streamable file-like object
76 encoding : optional, the encoding to attempt to read
77
78 Returns
79 -------
80 a tuple of (a stream of decoded bytes, the encoding which was used)
81
82 """
83
84 if compat.PY3 or encoding is not None: # pragma: no cover
85 if encoding:
86 errors = 'strict'
87 else:
88 errors = 'replace'
89 encoding = 'utf-8'
90 reader = StringIO(reader.read().decode(encoding, errors))
91 else:
92 encoding = None
93 return reader, encoding
94
95 def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
96 """
97 If the filepath_or_buffer is a url, translate and return the buffer
98 passthru otherwise.
99
100 Parameters
101 ----------
102 filepath_or_buffer : a url, filepath, or buffer
103 encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
104
105 Returns
106 -------
107 a filepath_or_buffer, the encoding
108 """
109
110 if _is_url(filepath_or_buffer):
111 req = _urlopen(str(filepath_or_buffer))
112 return maybe_read_encoded_stream(req,encoding)
113
114 if _is_s3_url(filepath_or_buffer):
115 try:
116 import boto
117 except:
118 raise ImportError("boto is required to handle s3 files")
119 # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
120 # are environment variables
121 parsed_url = parse_url(filepath_or_buffer)
122 conn = boto.connect_s3()
123 b = conn.get_bucket(parsed_url.netloc)
124 k = boto.s3.key.Key(b)
125 k.key = parsed_url.path
126 filepath_or_buffer = StringIO(k.get_contents_as_string())
127 return filepath_or_buffer, None
128
129 return filepath_or_buffer, None
130
131
132 # ZipFile is not a context manager for <= 2.6
133 # must be tuple index here since 2.6 doesn't use namedtuple for version_info
134 if sys.version_info[1] <= 6:
135 @contextmanager
136 def ZipFile(*args, **kwargs):
137 with closing(zipfile.ZipFile(*args, **kwargs)) as zf:
138 yield zf
139 else:
140 ZipFile = zipfile.ZipFile
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandas/io/common.py b/pandas/io/common.py
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -36,10 +36,15 @@
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
+
class PerformanceWarning(Warning):
pass
+class DtypeWarning(Warning):
+ pass
+
+
def _is_url(url):
"""Check to see if a URL has a valid protocol.
| {"golden_diff": "diff --git a/pandas/io/common.py b/pandas/io/common.py\n--- a/pandas/io/common.py\n+++ b/pandas/io/common.py\n@@ -36,10 +36,15 @@\n _VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n _VALID_URLS.discard('')\n \n+\n class PerformanceWarning(Warning):\n pass\n \n \n+class DtypeWarning(Warning):\n+ pass\n+\n+\n def _is_url(url):\n \"\"\"Check to see if a URL has a valid protocol.\n", "issue": "BUG: Unexpected behaviour when reading large text files with mixed datatypes\nread_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg\n\n``` python\n\n>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})\nlen(set(df.colA))\n500001\n\n>>> df.to_csv('testpandas2.txt')\n>>> df2=read_csv('testpandas2.txt')\n>>> len(set(df2.colA))\n762143\n\n >>> pandas.__version__\n'0.11.0'\n```\n\nIt seems some of the integers are parsed as integers and others as strings.\n\n``` python\n>>> list(set(df2.colA))[-10:]\n['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']\n>>> list(set(df2.colA))[:10]\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n```\n\nBUG: Unexpected behaviour when reading large text files with mixed datatypes\nread_csv gives unexpected behaviour with large files if a column contains both strings and integers. eg\n\n``` python\n\n>>> df=DataFrame({'colA':range(500000-1)+['apple', 'pear']+range(500000-1)})\nlen(set(df.colA))\n500001\n\n>>> df.to_csv('testpandas2.txt')\n>>> df2=read_csv('testpandas2.txt')\n>>> len(set(df2.colA))\n762143\n\n >>> pandas.__version__\n'0.11.0'\n```\n\nIt seems some of the integers are parsed as integers and others as strings.\n\n``` python\n>>> list(set(df2.colA))[-10:]\n['282248', '282249', '282240', '282241', '282242', '15679', '282244', '282245', '282246', '282247']\n>>> list(set(df2.colA))[:10]\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n```\n\n", "before_files": [{"content": "\"\"\"Common IO api utilities\"\"\"\n\nimport sys\nimport zipfile\nfrom contextlib import contextmanager, closing\n\nfrom pandas.compat import StringIO\nfrom pandas import compat\n\n\nif compat.PY3:\n from urllib.request import urlopen\n _urlopen = urlopen\n from urllib.parse import urlparse as parse_url\n import urllib.parse as compat_parse\n from urllib.parse import uses_relative, uses_netloc, uses_params, urlencode\n from urllib.error import URLError\n from http.client import HTTPException\nelse:\n from urllib2 import urlopen as _urlopen\n from urllib import urlencode\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n from urllib2 import URLError\n from httplib import HTTPException\n from contextlib import contextmanager, closing\n from functools import wraps\n\n # @wraps(_urlopen)\n @contextmanager\n def urlopen(*args, **kwargs):\n with closing(_urlopen(*args, **kwargs)) as f:\n yield f\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\nclass PerformanceWarning(Warning):\n pass\n\n\ndef _is_url(url):\n \"\"\"Check to see if a URL has a valid protocol.\n\n Parameters\n ----------\n url : str or unicode\n\n Returns\n -------\n isurl : bool\n If `url` has a valid protocol return True otherwise False.\n \"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef _is_s3_url(url):\n \"\"\"Check for an s3 url\"\"\"\n try:\n return parse_url(url).scheme == 's3'\n except:\n return False\n\n\ndef maybe_read_encoded_stream(reader, encoding=None):\n \"\"\" read an encoded stream from the reader and transform the bytes to unicode\n if required based on the encoding\n\n Parameters\n ----------\n reader : a streamable file-like object\n encoding : optional, the encoding to attempt to read\n\n Returns\n -------\n a tuple of (a stream of decoded bytes, the encoding which was used)\n\n \"\"\"\n\n if compat.PY3 or encoding is not None: # pragma: no cover\n if encoding:\n errors = 'strict'\n else:\n errors = 'replace'\n encoding = 'utf-8'\n reader = StringIO(reader.read().decode(encoding, errors))\n else:\n encoding = None\n return reader, encoding\n\ndef get_filepath_or_buffer(filepath_or_buffer, encoding=None):\n \"\"\"\n If the filepath_or_buffer is a url, translate and return the buffer\n passthru otherwise.\n\n Parameters\n ----------\n filepath_or_buffer : a url, filepath, or buffer\n encoding : the encoding to use to decode py3 bytes, default is 'utf-8'\n\n Returns\n -------\n a filepath_or_buffer, the encoding\n \"\"\"\n\n if _is_url(filepath_or_buffer):\n req = _urlopen(str(filepath_or_buffer))\n return maybe_read_encoded_stream(req,encoding)\n\n if _is_s3_url(filepath_or_buffer):\n try:\n import boto\n except:\n raise ImportError(\"boto is required to handle s3 files\")\n # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY\n # are environment variables\n parsed_url = parse_url(filepath_or_buffer)\n conn = boto.connect_s3()\n b = conn.get_bucket(parsed_url.netloc)\n k = boto.s3.key.Key(b)\n k.key = parsed_url.path\n filepath_or_buffer = StringIO(k.get_contents_as_string())\n return filepath_or_buffer, None\n\n return filepath_or_buffer, None\n\n\n# ZipFile is not a context manager for <= 2.6\n# must be tuple index here since 2.6 doesn't use namedtuple for version_info\nif sys.version_info[1] <= 6:\n @contextmanager\n def ZipFile(*args, **kwargs):\n with closing(zipfile.ZipFile(*args, **kwargs)) as zf:\n yield zf\nelse:\n ZipFile = zipfile.ZipFile\n", "path": "pandas/io/common.py"}], "after_files": [{"content": "\"\"\"Common IO api utilities\"\"\"\n\nimport sys\nimport zipfile\nfrom contextlib import contextmanager, closing\n\nfrom pandas.compat import StringIO\nfrom pandas import compat\n\n\nif compat.PY3:\n from urllib.request import urlopen\n _urlopen = urlopen\n from urllib.parse import urlparse as parse_url\n import urllib.parse as compat_parse\n from urllib.parse import uses_relative, uses_netloc, uses_params, urlencode\n from urllib.error import URLError\n from http.client import HTTPException\nelse:\n from urllib2 import urlopen as _urlopen\n from urllib import urlencode\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n from urllib2 import URLError\n from httplib import HTTPException\n from contextlib import contextmanager, closing\n from functools import wraps\n\n # @wraps(_urlopen)\n @contextmanager\n def urlopen(*args, **kwargs):\n with closing(_urlopen(*args, **kwargs)) as f:\n yield f\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\nclass PerformanceWarning(Warning):\n pass\n\n\nclass DtypeWarning(Warning):\n pass\n\n\ndef _is_url(url):\n \"\"\"Check to see if a URL has a valid protocol.\n\n Parameters\n ----------\n url : str or unicode\n\n Returns\n -------\n isurl : bool\n If `url` has a valid protocol return True otherwise False.\n \"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef _is_s3_url(url):\n \"\"\"Check for an s3 url\"\"\"\n try:\n return parse_url(url).scheme == 's3'\n except:\n return False\n\n\ndef maybe_read_encoded_stream(reader, encoding=None):\n \"\"\" read an encoded stream from the reader and transform the bytes to unicode\n if required based on the encoding\n\n Parameters\n ----------\n reader : a streamable file-like object\n encoding : optional, the encoding to attempt to read\n\n Returns\n -------\n a tuple of (a stream of decoded bytes, the encoding which was used)\n\n \"\"\"\n\n if compat.PY3 or encoding is not None: # pragma: no cover\n if encoding:\n errors = 'strict'\n else:\n errors = 'replace'\n encoding = 'utf-8'\n reader = StringIO(reader.read().decode(encoding, errors))\n else:\n encoding = None\n return reader, encoding\n\ndef get_filepath_or_buffer(filepath_or_buffer, encoding=None):\n \"\"\"\n If the filepath_or_buffer is a url, translate and return the buffer\n passthru otherwise.\n\n Parameters\n ----------\n filepath_or_buffer : a url, filepath, or buffer\n encoding : the encoding to use to decode py3 bytes, default is 'utf-8'\n\n Returns\n -------\n a filepath_or_buffer, the encoding\n \"\"\"\n\n if _is_url(filepath_or_buffer):\n req = _urlopen(str(filepath_or_buffer))\n return maybe_read_encoded_stream(req,encoding)\n\n if _is_s3_url(filepath_or_buffer):\n try:\n import boto\n except:\n raise ImportError(\"boto is required to handle s3 files\")\n # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY\n # are environment variables\n parsed_url = parse_url(filepath_or_buffer)\n conn = boto.connect_s3()\n b = conn.get_bucket(parsed_url.netloc)\n k = boto.s3.key.Key(b)\n k.key = parsed_url.path\n filepath_or_buffer = StringIO(k.get_contents_as_string())\n return filepath_or_buffer, None\n\n return filepath_or_buffer, None\n\n\n# ZipFile is not a context manager for <= 2.6\n# must be tuple index here since 2.6 doesn't use namedtuple for version_info\nif sys.version_info[1] <= 6:\n @contextmanager\n def ZipFile(*args, **kwargs):\n with closing(zipfile.ZipFile(*args, **kwargs)) as zf:\n yield zf\nelse:\n ZipFile = zipfile.ZipFile\n", "path": "pandas/io/common.py"}]} | 2,056 | 114 |
gh_patches_debug_5432 | rasdani/github-patches | git_diff | lhotse-speech__lhotse-240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cut concatenate doesn't consider the first sample in each batch
Found in #234
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lhotse/dataset/cut_transforms/concatenate.py`
Content:
```
1 from typing import Optional, Sequence
2
3 from lhotse import CutSet
4 from lhotse.cut import AnyCut
5 from lhotse.utils import Seconds
6
7
8 class CutConcatenate:
9 """
10 A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;
11 e.g. instead of creating a batch with 40 examples, we will merge some of the examples together
12 adding some silence between them to avoid a large number of padding frames that waste the computation.
13 """
14
15 def __init__(
16 self,
17 gap: Seconds = 1.0,
18 duration_factor: float = 1.0
19 ) -> None:
20 """
21 CutConcatenate's constructor.
22
23 :param gap: The duration of silence in seconds that is inserted between the cuts;
24 it's goal is to let the model "know" that there are separate utterances in a single example.
25 :param duration_factor: Determines the maximum duration of the concatenated cuts;
26 by default it's 1, setting the limit at the duration of the longest cut in the batch.
27 """
28 self.gap = gap
29 self.duration_factor = duration_factor
30
31 def __call__(self, cuts: CutSet) -> CutSet:
32 cuts = cuts.sort_by_duration(ascending=False)
33 return concat_cuts(
34 cuts,
35 gap=self.gap,
36 max_duration=cuts[0].duration * self.duration_factor
37 )
38
39
40 def concat_cuts(
41 cuts: Sequence[AnyCut],
42 gap: Seconds = 1.0,
43 max_duration: Optional[Seconds] = None
44 ) -> CutSet:
45 """
46 We're going to concatenate the cuts to minimize the amount of total padding frames used.
47 This means that some samples in the batch will be merged together into one sample,
48 separated by an interval of silence.
49 This is actually solving a knapsack problem.
50 In this initial implementation we're using a greedy approach:
51 going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut
52 that still has some "space" at the end.
53
54 :param cuts: a list of cuts to pack.
55 :param gap: the duration of silence inserted between concatenated cuts.
56 :param max_duration: the maximum duration for the concatenated cuts
57 (by default set to the duration of the first cut).
58 :return a list of packed cuts.
59 """
60 if len(cuts) <= 1:
61 # Nothing to do.
62 return CutSet.from_cuts(cuts)
63 cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)
64 max_duration = cuts[0].duration if max_duration is None else max_duration
65 current_idx = 1
66 while True:
67 can_fit = False
68 shortest = cuts[-1]
69 for idx in range(current_idx, len(cuts) - 1):
70 cut = cuts[current_idx]
71 can_fit = cut.duration + gap + shortest.duration <= max_duration
72 if can_fit:
73 cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)
74 cuts = cuts[:-1]
75 break
76 current_idx += 1
77 if not can_fit:
78 break
79 return CutSet.from_cuts(cuts)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lhotse/dataset/cut_transforms/concatenate.py b/lhotse/dataset/cut_transforms/concatenate.py
--- a/lhotse/dataset/cut_transforms/concatenate.py
+++ b/lhotse/dataset/cut_transforms/concatenate.py
@@ -62,7 +62,7 @@
return CutSet.from_cuts(cuts)
cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)
max_duration = cuts[0].duration if max_duration is None else max_duration
- current_idx = 1
+ current_idx = 0
while True:
can_fit = False
shortest = cuts[-1]
| {"golden_diff": "diff --git a/lhotse/dataset/cut_transforms/concatenate.py b/lhotse/dataset/cut_transforms/concatenate.py\n--- a/lhotse/dataset/cut_transforms/concatenate.py\n+++ b/lhotse/dataset/cut_transforms/concatenate.py\n@@ -62,7 +62,7 @@\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n- current_idx = 1\n+ current_idx = 0\n while True:\n can_fit = False\n shortest = cuts[-1]\n", "issue": "Cut concatenate doesn't consider the first sample in each batch\nFound in #234 \n", "before_files": [{"content": "from typing import Optional, Sequence\n\nfrom lhotse import CutSet\nfrom lhotse.cut import AnyCut\nfrom lhotse.utils import Seconds\n\n\nclass CutConcatenate:\n \"\"\"\n A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;\n e.g. instead of creating a batch with 40 examples, we will merge some of the examples together\n adding some silence between them to avoid a large number of padding frames that waste the computation.\n \"\"\"\n\n def __init__(\n self,\n gap: Seconds = 1.0,\n duration_factor: float = 1.0\n ) -> None:\n \"\"\"\n CutConcatenate's constructor.\n\n :param gap: The duration of silence in seconds that is inserted between the cuts;\n it's goal is to let the model \"know\" that there are separate utterances in a single example.\n :param duration_factor: Determines the maximum duration of the concatenated cuts;\n by default it's 1, setting the limit at the duration of the longest cut in the batch.\n \"\"\"\n self.gap = gap\n self.duration_factor = duration_factor\n\n def __call__(self, cuts: CutSet) -> CutSet:\n cuts = cuts.sort_by_duration(ascending=False)\n return concat_cuts(\n cuts,\n gap=self.gap,\n max_duration=cuts[0].duration * self.duration_factor\n )\n\n\ndef concat_cuts(\n cuts: Sequence[AnyCut],\n gap: Seconds = 1.0,\n max_duration: Optional[Seconds] = None\n) -> CutSet:\n \"\"\"\n We're going to concatenate the cuts to minimize the amount of total padding frames used.\n This means that some samples in the batch will be merged together into one sample,\n separated by an interval of silence.\n This is actually solving a knapsack problem.\n In this initial implementation we're using a greedy approach:\n going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut\n that still has some \"space\" at the end.\n\n :param cuts: a list of cuts to pack.\n :param gap: the duration of silence inserted between concatenated cuts.\n :param max_duration: the maximum duration for the concatenated cuts\n (by default set to the duration of the first cut).\n :return a list of packed cuts.\n \"\"\"\n if len(cuts) <= 1:\n # Nothing to do.\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n current_idx = 1\n while True:\n can_fit = False\n shortest = cuts[-1]\n for idx in range(current_idx, len(cuts) - 1):\n cut = cuts[current_idx]\n can_fit = cut.duration + gap + shortest.duration <= max_duration\n if can_fit:\n cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)\n cuts = cuts[:-1]\n break\n current_idx += 1\n if not can_fit:\n break\n return CutSet.from_cuts(cuts)\n", "path": "lhotse/dataset/cut_transforms/concatenate.py"}], "after_files": [{"content": "from typing import Optional, Sequence\n\nfrom lhotse import CutSet\nfrom lhotse.cut import AnyCut\nfrom lhotse.utils import Seconds\n\n\nclass CutConcatenate:\n \"\"\"\n A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;\n e.g. instead of creating a batch with 40 examples, we will merge some of the examples together\n adding some silence between them to avoid a large number of padding frames that waste the computation.\n \"\"\"\n\n def __init__(\n self,\n gap: Seconds = 1.0,\n duration_factor: float = 1.0\n ) -> None:\n \"\"\"\n CutConcatenate's constructor.\n\n :param gap: The duration of silence in seconds that is inserted between the cuts;\n it's goal is to let the model \"know\" that there are separate utterances in a single example.\n :param duration_factor: Determines the maximum duration of the concatenated cuts;\n by default it's 1, setting the limit at the duration of the longest cut in the batch.\n \"\"\"\n self.gap = gap\n self.duration_factor = duration_factor\n\n def __call__(self, cuts: CutSet) -> CutSet:\n cuts = cuts.sort_by_duration(ascending=False)\n return concat_cuts(\n cuts,\n gap=self.gap,\n max_duration=cuts[0].duration * self.duration_factor\n )\n\n\ndef concat_cuts(\n cuts: Sequence[AnyCut],\n gap: Seconds = 1.0,\n max_duration: Optional[Seconds] = None\n) -> CutSet:\n \"\"\"\n We're going to concatenate the cuts to minimize the amount of total padding frames used.\n This means that some samples in the batch will be merged together into one sample,\n separated by an interval of silence.\n This is actually solving a knapsack problem.\n In this initial implementation we're using a greedy approach:\n going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut\n that still has some \"space\" at the end.\n\n :param cuts: a list of cuts to pack.\n :param gap: the duration of silence inserted between concatenated cuts.\n :param max_duration: the maximum duration for the concatenated cuts\n (by default set to the duration of the first cut).\n :return a list of packed cuts.\n \"\"\"\n if len(cuts) <= 1:\n # Nothing to do.\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n current_idx = 0\n while True:\n can_fit = False\n shortest = cuts[-1]\n for idx in range(current_idx, len(cuts) - 1):\n cut = cuts[current_idx]\n can_fit = cut.duration + gap + shortest.duration <= max_duration\n if can_fit:\n cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)\n cuts = cuts[:-1]\n break\n current_idx += 1\n if not can_fit:\n break\n return CutSet.from_cuts(cuts)\n", "path": "lhotse/dataset/cut_transforms/concatenate.py"}]} | 1,140 | 154 |
gh_patches_debug_34601 | rasdani/github-patches | git_diff | sunpy__sunpy-7316 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Resampling Maps in the example gallery gives a confusing example for the superpixel method
### Provide a general description of the issue or problem.
That's a minor thing perhaps but checking this [page](https://docs.sunpy.org/en/stable/generated/gallery/map/map_resampling_and_superpixels.html) I got confused by the example for the superpixel method.
It says:
`new_dimensions = u.Quantity(aia_map.dimensions) / 16`
`aia_superpixel_map = aia_map.superpixel([new_dimensions]`
The first line should be instead e.g.:
`new_dimensions=[16,16]*u.pixel `
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/map/map_resampling_and_superpixels.py`
Content:
```
1 """
2 ===============
3 Resampling Maps
4 ===============
5
6 How to resample a map using the resample method, which implements interpolation, or
7 using superpixels, which combines pixels.
8 """
9 import matplotlib.pyplot as plt
10
11 import astropy.units as u
12
13 import sunpy.data.sample
14 import sunpy.map
15
16 ###############################################################################
17 # We start with the sample data.
18
19 aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
20
21 ##############################################################################
22 # To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,
23 # specifying the new dimensions in pixels. By default, this method uses linear interpolation
24 # but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').
25
26 new_dimensions = [40, 40] * u.pixel
27 aia_resampled_map = aia_map.resample(new_dimensions)
28
29 ##############################################################################
30 # Let's plot the result.
31
32 fig = plt.figure()
33 ax = fig.add_subplot(projection=aia_resampled_map)
34 aia_resampled_map.plot(axes=ax)
35 plt.show()
36
37 ##############################################################################
38 # Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.
39 # This can be used to increase the signal to noise ratio by reducing the
40 # resolution of the image by combining pixels. This means that the new dimension
41 # must divide the original size exactly.
42 # For example you can reduce the AIA map resolution by a factor of 16.
43
44 new_dimensions = u.Quantity(aia_map.dimensions) / 16
45 aia_superpixel_map = aia_map.superpixel(new_dimensions)
46
47 ##############################################################################
48 # Let's plot the result.
49
50 fig = plt.figure()
51 ax = fig.add_subplot(projection=aia_superpixel_map)
52 aia_superpixel_map.plot(axes=ax)
53 plt.show()
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/map/map_resampling_and_superpixels.py b/examples/map/map_resampling_and_superpixels.py
--- a/examples/map/map_resampling_and_superpixels.py
+++ b/examples/map/map_resampling_and_superpixels.py
@@ -13,15 +13,16 @@
import sunpy.data.sample
import sunpy.map
-###############################################################################
+##############################################################################
# We start with the sample data.
aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
##############################################################################
-# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,
-# specifying the new dimensions in pixels. By default, this method uses linear interpolation
-# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').
+# To reduce the angular resolution of the map, you can use the
+# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions
+# in pixels. By default, this method uses linear interpolation but this can be
+# changed with the ``method`` argument ('nearest', 'linear' or 'spline').
new_dimensions = [40, 40] * u.pixel
aia_resampled_map = aia_map.resample(new_dimensions)
@@ -35,14 +36,15 @@
plt.show()
##############################################################################
-# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.
-# This can be used to increase the signal to noise ratio by reducing the
-# resolution of the image by combining pixels. This means that the new dimension
-# must divide the original size exactly.
-# For example you can reduce the AIA map resolution by a factor of 16.
-
-new_dimensions = u.Quantity(aia_map.dimensions) / 16
-aia_superpixel_map = aia_map.superpixel(new_dimensions)
+# Another way to reduce the angular resolution of the map is by using the
+# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.
+# The superpixel dimensions do not need to be square, and the intensity of
+# each superpixel defaults to the sum of the constituent pixels. For example,
+# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16
+# superpixels.
+
+superpixel_size = [16, 16] * u.pixel
+aia_superpixel_map = aia_map.superpixel(superpixel_size)
##############################################################################
# Let's plot the result.
| {"golden_diff": "diff --git a/examples/map/map_resampling_and_superpixels.py b/examples/map/map_resampling_and_superpixels.py\n--- a/examples/map/map_resampling_and_superpixels.py\n+++ b/examples/map/map_resampling_and_superpixels.py\n@@ -13,15 +13,16 @@\n import sunpy.data.sample\n import sunpy.map\n \n-###############################################################################\n+##############################################################################\n # We start with the sample data.\n \n aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n \n ##############################################################################\n-# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,\n-# specifying the new dimensions in pixels. By default, this method uses linear interpolation\n-# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n+# To reduce the angular resolution of the map, you can use the\n+# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions\n+# in pixels. By default, this method uses linear interpolation but this can be\n+# changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n \n new_dimensions = [40, 40] * u.pixel\n aia_resampled_map = aia_map.resample(new_dimensions)\n@@ -35,14 +36,15 @@\n plt.show()\n \n ##############################################################################\n-# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.\n-# This can be used to increase the signal to noise ratio by reducing the\n-# resolution of the image by combining pixels. This means that the new dimension\n-# must divide the original size exactly.\n-# For example you can reduce the AIA map resolution by a factor of 16.\n-\n-new_dimensions = u.Quantity(aia_map.dimensions) / 16\n-aia_superpixel_map = aia_map.superpixel(new_dimensions)\n+# Another way to reduce the angular resolution of the map is by using the\n+# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.\n+# The superpixel dimensions do not need to be square, and the intensity of\n+# each superpixel defaults to the sum of the constituent pixels. For example,\n+# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16\n+# superpixels.\n+\n+superpixel_size = [16, 16] * u.pixel\n+aia_superpixel_map = aia_map.superpixel(superpixel_size)\n \n ##############################################################################\n # Let's plot the result.\n", "issue": "Resampling Maps in the example gallery gives a confusing example for the superpixel method\n### Provide a general description of the issue or problem.\n\nThat's a minor thing perhaps but checking this [page](https://docs.sunpy.org/en/stable/generated/gallery/map/map_resampling_and_superpixels.html) I got confused by the example for the superpixel method. \r\nIt says:\r\n`new_dimensions = u.Quantity(aia_map.dimensions) / 16`\r\n`aia_superpixel_map = aia_map.superpixel([new_dimensions]`\r\n\r\nThe first line should be instead e.g.:\r\n`new_dimensions=[16,16]*u.pixel `\n", "before_files": [{"content": "\"\"\"\n===============\nResampling Maps\n===============\n\nHow to resample a map using the resample method, which implements interpolation, or\nusing superpixels, which combines pixels.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n###############################################################################\n# We start with the sample data.\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n\n##############################################################################\n# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,\n# specifying the new dimensions in pixels. By default, this method uses linear interpolation\n# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n\nnew_dimensions = [40, 40] * u.pixel\naia_resampled_map = aia_map.resample(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_resampled_map)\naia_resampled_map.plot(axes=ax)\nplt.show()\n\n##############################################################################\n# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.\n# This can be used to increase the signal to noise ratio by reducing the\n# resolution of the image by combining pixels. This means that the new dimension\n# must divide the original size exactly.\n# For example you can reduce the AIA map resolution by a factor of 16.\n\nnew_dimensions = u.Quantity(aia_map.dimensions) / 16\naia_superpixel_map = aia_map.superpixel(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_superpixel_map)\naia_superpixel_map.plot(axes=ax)\nplt.show()\n", "path": "examples/map/map_resampling_and_superpixels.py"}], "after_files": [{"content": "\"\"\"\n===============\nResampling Maps\n===============\n\nHow to resample a map using the resample method, which implements interpolation, or\nusing superpixels, which combines pixels.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n##############################################################################\n# We start with the sample data.\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n\n##############################################################################\n# To reduce the angular resolution of the map, you can use the\n# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions\n# in pixels. By default, this method uses linear interpolation but this can be\n# changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n\nnew_dimensions = [40, 40] * u.pixel\naia_resampled_map = aia_map.resample(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_resampled_map)\naia_resampled_map.plot(axes=ax)\nplt.show()\n\n##############################################################################\n# Another way to reduce the angular resolution of the map is by using the\n# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.\n# The superpixel dimensions do not need to be square, and the intensity of\n# each superpixel defaults to the sum of the constituent pixels. For example,\n# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16\n# superpixels.\n\nsuperpixel_size = [16, 16] * u.pixel\naia_superpixel_map = aia_map.superpixel(superpixel_size)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_superpixel_map)\naia_superpixel_map.plot(axes=ax)\nplt.show()\n", "path": "examples/map/map_resampling_and_superpixels.py"}]} | 888 | 554 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.